diff --git a/.gitattributes b/.gitattributes index c7a3f7a86..aa9391657 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,3 @@ /**/*.pb.go -diff -merge /**/*.pb.go linguist-generated=true +/go.sum -diff diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index db9930e91..3d9b2b8f2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,7 +31,7 @@ repos: - id: shellcheck - repo: https://github.com/golangci/golangci-lint - rev: v1.51.2 + rev: v1.52.2 hooks: - id: golangci-lint @@ -48,3 +48,4 @@ repos: rev: v1.0.0-rc.1 hooks: - id: go-staticcheck-repo-mod + - id: go-mod-tidy diff --git a/CHANGELOG.md b/CHANGELOG.md index b4d6e7ca0..97feb2245 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,32 @@ Changelog for FrostFS Node ## [Unreleased] ### Added +- Support impersonate bearer token (#229) +- Change log level on SIGHUP for ir (#125) +- Reload pprof and metrics on SIGHUP for ir (#125) +- Support copies number parameter in `frostfs-cli object put` (#351) +- Set extra wallets on SIGHUP for ir (#125) + ### Changed +- `frostfs-cli util locode generate` is now much faster (#309) ### Fixed +- Take network settings into account during netmap contract update (#100) +- Read config files from dir even if config file not provided via `--config` for node (#238) +- Notary requests parsing according to `neo-go`'s updates (#268) +- Tree service panic in its internal client cache (#322) +- Iterate over endpoints when create ws client in morph's constructor (#304) +- Delete complex objects with GC (#332) + ### Removed ### Updated +- `paulmach/orb` to v0.9.1 +- `github.com/nats-io/nats.go` to `v1.25.0` +- `golang.org/x/sync` to `v0.2.0` +- `golang.org/x/term` to `v0.8.0` +- `github.com/spf13/cobra` to `v1.7.0` +- `github.com/multiformats/go-multiaddr` to `v0.9.0` +- `go.uber.org/atomic` to `v1.11.0` + ### Updating from v0.36.0 ## [v0.36.0] - 2023-04-12 - Furtwängler @@ -25,6 +47,7 @@ Changelog for FrostFS Node - Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37) - Tree service now saves the last synchronization height which persists across restarts (#82) - Add tracing support (#135) +- Multiple (and a fix for single) copies number support for `PUT` requests (#221) ### Changed - Change `frostfs_node_engine_container_size` to counting sizes of logical objects @@ -64,6 +87,9 @@ Changelog for FrostFS Node - Iterating over just removed files by FSTree (#98) - Parts of a locked object could not be removed anymore (#141) - Non-alphabet nodes do not try to handle alphabet events (#181) +- Failing SN and IR transactions because of incorrect scopes (#2230, #2263) +- Global scope used for some transactions (#2230, #2263) +- Concurrent morph cache misses (#30) ### Removed ### Updated @@ -84,7 +110,7 @@ You need to change configuration environment variables to `FROSTFS_*` if you use New config field `object.delete.tombstone_lifetime` allows to set tombstone lifetime more appropriate for a specific deployment. -Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__` +Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__` (existed objects with old attributes will be treated as before, but for new objects new attributes will be used). ## Older versions diff --git a/Makefile b/Makefile index 7d0f8d9e2..29625e657 100755 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ HUB_IMAGE ?= truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.19 -LINT_VERSION ?= 1.50.0 +LINT_VERSION ?= 1.52.2 ARCH = amd64 BIN = bin diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md index a1923bb2c..aead65fe0 100644 --- a/cmd/frostfs-adm/docs/deploy.md +++ b/cmd/frostfs-adm/docs/deploy.md @@ -147,7 +147,6 @@ NNS: Set container.frostfs -> cae60bdd689d185901e495352d0247752ce50846 NNS: Set frostfsid.frostfs -> c421fb60a3895865a8f24d197d6a80ef686041d2 NNS: Set netmap.frostfs -> 894eb854632f50fb124412ce7951ebc00763525e NNS: Set proxy.frostfs -> ac6e6fe4b373d0ca0ca4969d1e58fa0988724e7d -NNS: Set reputation.frostfs -> 6eda57c9d93d990573646762d1fea327ce41191f Waiting for transactions to persist... ``` diff --git a/cmd/frostfs-adm/docs/subnetwork-creation.md b/cmd/frostfs-adm/docs/subnetwork-creation.md deleted file mode 100644 index 5ada94387..000000000 --- a/cmd/frostfs-adm/docs/subnetwork-creation.md +++ /dev/null @@ -1,39 +0,0 @@ -# FrostFS subnetwork creation - -This is a short guide on how to create FrostFS subnetworks. This guide -considers that the sidechain and the inner ring (alphabet nodes) have already been -deployed and the sidechain contains a deployed `subnet` contract. - -## Prerequisites - -To follow this guide, you need: -- neo-go sidechain RPC endpoint; -- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases); -- wallet with FrostFS account. - -## Creation - -```shell -$ frostfs-adm morph subnet create \ - -r \ - -w \ - --notary -Create subnet request sent successfully. ID: 4223489767. -``` - -**NOTE:** in notary-enabled environment you should have a sufficient -notary deposit (not expired, with enough GAS balance). Your subnet ID -will differ from the example. - -The default account in the wallet that has been passed with `-w` flag is the owner -of the just created subnetwork. - -You can check if your subnetwork was created successfully: - -```shell -$ frostfs-adm morph subnet get \ - -r \ - --subnet -Owner: NUc734PMJXiqa2J9jRtvskU3kCdyyuSN8Q -``` -Your owner will differ from the example. diff --git a/cmd/frostfs-adm/docs/subnetwork-usage.md b/cmd/frostfs-adm/docs/subnetwork-usage.md deleted file mode 100644 index 0d505b3a4..000000000 --- a/cmd/frostfs-adm/docs/subnetwork-usage.md +++ /dev/null @@ -1,137 +0,0 @@ -# Managing Subnetworks - -This is a short guide on how to manage FrostFS subnetworks. This guide -considers that the sidechain and the inner ring (alphabet nodes) have already been -deployed, and the sidechain contains a deployed `subnet` contract. - -## Prerequisites - -- neo-go sidechain RPC endpoint; -- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases); -- [created](subnetwork-creation.md) subnetwork; -- wallet with the account that owns the subnetwork; -- public key of the Storage Node; -- public keys of the node and client administrators; -- owner IDs of the FrostFS users. - -## Add node administrator - -Node administrators are accounts that can manage (add and delete nodes) -the whitelist of the nodes which can be included to a subnetwork. Only the subnet -owner is allowed to add and remove node administrators from the subnetwork. - -```shell -$ frostfs-adm morph subnet admin add \ - -r \ - -w \ - --admin \ - --subnet -Add admin request sent successfully. -``` - -## Add node - -Adding a node to a subnetwork means that the node becomes able to service -containers that have been created in that subnetwork. Addition only changes -the list of the allowed nodes. Node is not required to be bootstrapped at the -moment of its inclusion. - -```shell -$ frostfs-adm morph subnet node add \ - -r \ - -w \ - --node \ - --subnet -Add node request sent successfully. -``` - -**NOTE:** the owner of the subnetwork is also allowed to add nodes. - -## Add client administrator - -Client administrators are accounts that can manage (add and delete -nodes) the whitelist of the clients that can create containers in the -subnetwork. Only the subnet owner is allowed to add and remove client -administrators from the subnetwork. - -```shell -$ frostfs-adm morph subnet admin add \ - -r \ - -w \ - --admin \ - --subnet \ - --client \ - --group -Add admin request sent successfully. -``` - -**NOTE:** you do not need to create a group explicitly, it will be created -right after the first client admin is added. Group ID is a 4-byte -positive integer number. - -## Add client - -```shell -$ frostfs-adm morph subnet client add \ - -r \ - -w \ - --client \ - --subnet \ - --group -Add client request sent successfully. -``` - -**NOTE:** the owner of the subnetwork is also allowed to add clients. This is -the only one command that accepts `ownerID`, not the public key. -Administrator can manage only their group (a group where that administrator -has been added by the subnet owner). - -# Bootstrapping Storage Node - -After a subnetwork [is created](subnetwork-creation.md) and a node is included into it, the -node could be bootstrapped and service subnetwork containers. - -For bootstrapping, you need to specify the ID of the subnetwork in the node's -configuration: - -```yaml -... -node: - ... - subnet: - entries: # list of IDs of subnets to enter in a text format of FrostFS API protocol (overrides corresponding attributes) - - - ... -... -``` - -**NOTE:** specifying subnetwork that is denied for the node is not an error: -that configuration value would be ignored. You do not need to specify zero -(with 0 ID) subnetwork: its inclusion is implicit. On the contrary, to exclude -a node from the default zero subnetwork, you need to specify it explicitly: - -```yaml -... -node: - ... - subnet: - exit_zero: true # toggle entrance to zero subnet (overrides corresponding attribute and occurrence in `entries`) - ... -... -``` - -# Creating container in non-zero subnetwork - -Creating containers without using `--subnet` flag is equivalent to -creating container in the zero subnetwork. - -To create a container in a private network, your wallet must be added to -the client whitelist by the client admins or the subnet owners: - -```shell -$ frostfs-cli container create \ - --policy 'REP 1' \ - -w \ - -r s01.frostfs.devenv:8080 \ - --subnet -``` diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go index 13e575933..ebefe1d2c 100644 --- a/cmd/frostfs-adm/internal/modules/config/config.go +++ b/cmd/frostfs-adm/internal/modules/config/config.go @@ -47,7 +47,7 @@ credentials: {{.}}: password{{end}} ` -func initConfig(cmd *cobra.Command, args []string) error { +func initConfig(cmd *cobra.Command, _ []string) error { configPath, err := readConfigPathFromArgs(cmd) if err != nil { return nil diff --git a/cmd/frostfs-adm/internal/modules/morph/config.go b/cmd/frostfs-adm/internal/modules/morph/config.go index 8a888ab2c..09d071b53 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config.go @@ -10,12 +10,12 @@ import ( "strings" "text/tabwriter" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/vm/emit" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -48,41 +48,25 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - for _, param := range arr { - tuple, ok := param.Value().([]stackitem.Item) - if !ok || len(tuple) != 2 { - return errors.New("invalid ListConfig response from netmap contract") - } - - k, err := tuple[0].TryBytes() - if err != nil { - return errors.New("invalid config key from netmap contract") - } - - v, err := tuple[1].TryBytes() - if err != nil { - return invalidConfigValueErr(k) - } - - switch string(k) { - case netmapAuditFeeKey, netmapBasicIncomeRateKey, - netmapContainerFeeKey, netmapContainerAliasFeeKey, - netmapEigenTrustIterationsKey, - netmapEpochKey, netmapInnerRingCandidateFeeKey, - netmapMaxObjectSizeKey, netmapWithdrawFeeKey: + m, err := parseConfigFromNetmapContract(arr) + if err != nil { + return err + } + for k, v := range m { + switch k { + case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig, + netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig, + netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig, + netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig: nbuf := make([]byte, 8) copy(nbuf[:], v) n := binary.LittleEndian.Uint64(nbuf) _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) - case netmapEigenTrustAlphaKey: - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (str)\n", k, v))) - case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey: - vBool, err := tuple[1].TryBool() - if err != nil { + case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: + if len(v) == 0 || len(v) > 1 { return invalidConfigValueErr(k) } - - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, vBool))) + _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) default: _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) } @@ -150,25 +134,15 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error) valRaw := v switch key { - case netmapAuditFeeKey, netmapBasicIncomeRateKey, - netmapContainerFeeKey, netmapContainerAliasFeeKey, - netmapEigenTrustIterationsKey, - netmapEpochKey, netmapInnerRingCandidateFeeKey, - netmapMaxObjectSizeKey, netmapWithdrawFeeKey: + case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig, + netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig, + netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig, + netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig: val, err = strconv.ParseInt(valRaw, 10, 64) if err != nil { err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err) } - case netmapEigenTrustAlphaKey: - // just check that it could - // be parsed correctly - _, err = strconv.ParseFloat(v, 64) - if err != nil { - err = fmt.Errorf("could not parse %s's value '%s' as float: %w", key, valRaw, err) - } - - val = valRaw - case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey: + case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: val, err = strconv.ParseBool(valRaw) if err != nil { err = fmt.Errorf("could not parse %s's value '%s' as bool: %w", key, valRaw, err) @@ -187,6 +161,6 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error) return } -func invalidConfigValueErr(key []byte) error { +func invalidConfigValueErr(key string) error { return fmt.Errorf("invalid %s config value from netmap contract", key) } diff --git a/cmd/frostfs-adm/internal/modules/morph/download.go b/cmd/frostfs-adm/internal/modules/morph/download.go deleted file mode 100644 index 3c50c0b34..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/download.go +++ /dev/null @@ -1,40 +0,0 @@ -package morph - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - - "github.com/google/go-github/v39/github" - "github.com/spf13/cobra" -) - -func downloadContractsFromGithub(cmd *cobra.Command) (io.ReadCloser, error) { - gcl := github.NewClient(nil) - release, _, err := gcl.Repositories.GetLatestRelease(context.Background(), "nspcc-dev", "frostfs-contract") - if err != nil { - return nil, fmt.Errorf("can't fetch release info: %w", err) - } - - cmd.Printf("Found %s (%s), downloading...\n", release.GetTagName(), release.GetName()) - - var url string - for _, a := range release.Assets { - if strings.HasPrefix(a.GetName(), "frostfs-contract") { - url = a.GetBrowserDownloadURL() - break - } - } - if url == "" { - return nil, errors.New("can't find contracts archive in release assets") - } - - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("can't fetch contracts archive: %w", err) - } - return resp.Body, nil -} diff --git a/cmd/frostfs-adm/internal/modules/morph/epoch.go b/cmd/frostfs-adm/internal/modules/morph/epoch.go index 608ff6b43..1f622fa89 100644 --- a/cmd/frostfs-adm/internal/modules/morph/epoch.go +++ b/cmd/frostfs-adm/internal/modules/morph/epoch.go @@ -13,7 +13,7 @@ import ( "github.com/spf13/viper" ) -func forceNewEpochCmd(cmd *cobra.Command, args []string) error { +func forceNewEpochCmd(cmd *cobra.Command, _ []string) error { wCtx, err := newInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("can't to initialize context: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate.go index 23637fed3..dd327a4b1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate.go @@ -21,6 +21,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" + "golang.org/x/sync/errgroup" ) const ( @@ -29,7 +30,7 @@ const ( consensusAccountName = "consensus" ) -func generateAlphabetCreds(cmd *cobra.Command, args []string) error { +func generateAlphabetCreds(cmd *cobra.Command, _ []string) error { // alphabet size is not part of the config size, err := cmd.Flags().GetUint(alphabetSizeFlag) if err != nil { @@ -92,28 +93,32 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er pubs[i] = w.Accounts[0].PrivateKey().PublicKey() } + var errG errgroup.Group + // Create committee account with N/2+1 multi-signature. majCount := smartcontract.GetMajorityHonestNodeCount(size) - for i, w := range wallets { - if err := addMultisigAccount(w, majCount, committeeAccountName, passwords[i], pubs); err != nil { - return nil, fmt.Errorf("can't create committee account: %w", err) - } - } - // Create consensus account with 2*N/3+1 multi-signature. bftCount := smartcontract.GetDefaultHonestNodeCount(size) - for i, w := range wallets { - if err := addMultisigAccount(w, bftCount, consensusAccountName, passwords[i], pubs); err != nil { - return nil, fmt.Errorf("can't create consensus account: %w", err) - } + for i := range wallets { + i := i + ps := make(keys.PublicKeys, len(pubs)) + copy(ps, pubs) + errG.Go(func() error { + if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], ps); err != nil { + return fmt.Errorf("can't create committee account: %w", err) + } + if err := addMultisigAccount(wallets[i], bftCount, consensusAccountName, passwords[i], ps); err != nil { + return fmt.Errorf("can't create consentus account: %w", err) + } + if err := wallets[i].SavePretty(); err != nil { + return fmt.Errorf("can't save wallet: %w", err) + } + return nil + }) } - - for _, w := range wallets { - if err := w.SavePretty(); err != nil { - return nil, fmt.Errorf("can't save wallet: %w", err) - } + if err := errG.Wait(); err != nil { + return nil, err } - return passwords, nil } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate_test.go index 39cfc5718..457813df0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate_test.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strconv" + "sync" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" @@ -71,24 +72,31 @@ func TestGenerateAlphabet(t *testing.T) { buf.WriteString(testContractPassword + "\r") require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil)) + var wg sync.WaitGroup for i := uint64(0); i < size; i++ { - p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") - w, err := wallet.NewWalletFromFile(p) - require.NoError(t, err, "wallet doesn't exist") - require.Equal(t, 3, len(w.Accounts), "not all accounts were created") - for _, a := range w.Accounts { - err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams()) - require.NoError(t, err, "can't decrypt account") - switch a.Label { - case consensusAccountName: - require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters)) - case committeeAccountName: - require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters)) - default: - require.Equal(t, singleAccountName, a.Label) + i := i + go func() { + defer wg.Done() + p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") + w, err := wallet.NewWalletFromFile(p) + require.NoError(t, err, "wallet doesn't exist") + require.Equal(t, 3, len(w.Accounts), "not all accounts were created") + + for _, a := range w.Accounts { + err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams()) + require.NoError(t, err, "can't decrypt account") + switch a.Label { + case consensusAccountName: + require.Equal(t, smartcontract.GetDefaultHonestNodeCount(size), len(a.Contract.Parameters)) + case committeeAccountName: + require.Equal(t, smartcontract.GetMajorityHonestNodeCount(size), len(a.Contract.Parameters)) + default: + require.Equal(t, singleAccountName, a.Label) + } } - } + }() } + wg.Wait() t.Run("check contract group wallet", func(t *testing.T) { p := filepath.Join(walletDir, contractWalletFilename) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize.go index a99a5faff..494ad5296 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize.go @@ -45,7 +45,7 @@ type initializeContext struct { ContractPath string } -func initializeSideChainCmd(cmd *cobra.Command, args []string) error { +func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { initCtx, err := newInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("initialization error: %w", err) @@ -91,11 +91,7 @@ func initializeSideChainCmd(cmd *cobra.Command, args []string) error { } cmd.Println("Stage 7: set addresses in NNS.") - if err := initCtx.setNNS(); err != nil { - return err - } - - return nil + return initCtx.setNNS() } func (c *initializeContext) close() { @@ -142,7 +138,7 @@ func newInitializeContext(cmd *cobra.Command, v *viper.Viper) (*initializeContex return nil, err } - ctrPath, err := getContractsPath(cmd, v, needContracts) + ctrPath, err := getContractsPath(cmd, needContracts) if err != nil { return nil, err } @@ -222,7 +218,7 @@ func getWallet(cmd *cobra.Command, v *viper.Viper, needContracts bool, walletDir return openContractWallet(v, cmd, walletDir) } -func getContractsPath(cmd *cobra.Command, v *viper.Viper, needContracts bool) (string, error) { +func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) { if !needContracts { return "", nil } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go index ae80c2ffd..8a0a88899 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go @@ -16,6 +16,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -23,6 +24,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -30,8 +32,8 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" - "github.com/spf13/viper" ) const ( @@ -45,26 +47,6 @@ const ( frostfsIDContract = "frostfsid" netmapContract = "netmap" proxyContract = "proxy" - reputationContract = "reputation" - subnetContract = "subnet" -) - -const ( - netmapEpochKey = "EpochDuration" - netmapMaxObjectSizeKey = "MaxObjectSize" - netmapAuditFeeKey = "AuditFee" - netmapContainerFeeKey = "ContainerFee" - netmapContainerAliasFeeKey = "ContainerAliasFee" - netmapEigenTrustIterationsKey = "EigenTrustIterations" - netmapEigenTrustAlphaKey = "EigenTrustAlpha" - netmapBasicIncomeRateKey = "BasicIncomeRate" - netmapInnerRingCandidateFeeKey = "InnerRingCandidateFee" - netmapWithdrawFeeKey = "WithdrawFee" - netmapHomomorphicHashDisabledKey = "HomomorphicHashingDisabled" - netmapMaintenanceAllowedKey = "MaintenanceModeAllowed" - - defaultEigenTrustIterations = 4 - defaultEigenTrustAlpha = "0.1" ) var ( @@ -75,8 +57,6 @@ var ( frostfsIDContract, netmapContract, proxyContract, - reputationContract, - subnetContract, } fullContractList = append([]string{ @@ -85,6 +65,19 @@ var ( nnsContract, alphabetContract, }, contractList...) + + netmapConfigKeys = []string{ + netmap.EpochDurationConfig, + netmap.MaxObjectSizeConfig, + netmap.AuditFeeConfig, + netmap.ContainerFeeConfig, + netmap.ContainerAliasFeeConfig, + netmap.BasicIncomeRateConfig, + netmap.IrCandidateFeeConfig, + netmap.WithdrawFeeConfig, + netmap.HomomorphicHashingDisabledKey, + netmap.MaintenanceModeAllowedConfig, + } ) type contractState struct { @@ -239,7 +232,7 @@ func (c *initializeContext) deployOrUpdateContracts(w *io2.BufBinWriter, nnsHash invokeHash = ctrHash } - params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam)) + params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, updateMethodName)) res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...) if err != nil { if method != updateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) { @@ -362,7 +355,7 @@ func (c *initializeContext) deployContracts() error { return fmt.Errorf("can't sign manifest group: %v", err) } - params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam)) + params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, deployMethodName)) res, err := c.CommitteeAct.MakeCall(management.Hash, deployMethodName, params...) if err != nil { return fmt.Errorf("can't deploy %s contract: %w", ctrName, err) @@ -408,11 +401,9 @@ func (c *initializeContext) readContracts(names []string) error { } else { var r io.ReadCloser if c.ContractPath == "" { - c.Command.Println("Contracts flag is missing, latest release will be fetched from Github.") - r, err = downloadContractsFromGithub(c.Command) - } else { - r, err = os.Open(c.ContractPath) + return errors.New("contracts flag is missing") } + r, err = os.Open(c.ContractPath) if err != nil { return fmt.Errorf("can't open contracts archive: %w", err) } @@ -529,7 +520,7 @@ func getContractDeployParameters(cs *contractState, deployData []any) []any { return []any{cs.RawNEF, cs.RawManifest, deployData} } -func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any) []any { +func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any, method string) []any { items := make([]any, 1, 6) items[0] = false // notaryDisabled is false @@ -566,20 +557,31 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an c.Contracts[netmapContract].Hash, c.Contracts[containerContract].Hash) case netmapContract: - configParam := []any{ - netmapEpochKey, viper.GetInt64(epochDurationInitFlag), - netmapMaxObjectSizeKey, viper.GetInt64(maxObjectSizeInitFlag), - netmapAuditFeeKey, viper.GetInt64(auditFeeInitFlag), - netmapContainerFeeKey, viper.GetInt64(containerFeeInitFlag), - netmapContainerAliasFeeKey, viper.GetInt64(containerAliasFeeInitFlag), - netmapEigenTrustIterationsKey, int64(defaultEigenTrustIterations), - netmapEigenTrustAlphaKey, defaultEigenTrustAlpha, - netmapBasicIncomeRateKey, viper.GetInt64(incomeRateInitFlag), - netmapInnerRingCandidateFeeKey, viper.GetInt64(candidateFeeInitFlag), - netmapWithdrawFeeKey, viper.GetInt64(withdrawFeeInitFlag), - netmapHomomorphicHashDisabledKey, viper.GetBool(homomorphicHashDisabledInitFlag), - netmapMaintenanceAllowedKey, viper.GetBool(maintenanceModeAllowedInitFlag), + md := getDefaultNetmapContractConfigMap() + if method == updateMethodName { + arr, err := c.getNetConfigFromNetmapContract() + if err != nil { + panic(err) + } + m, err := parseConfigFromNetmapContract(arr) + if err != nil { + panic(err) + } + for k, v := range m { + for _, key := range netmapConfigKeys { + if k == key { + md[k] = v + break + } + } + } } + + var configParam []any + for k, v := range md { + configParam = append(configParam, k, v) + } + items = append(items, c.Contracts[balanceContract].Hash, c.Contracts[containerContract].Hash, @@ -587,14 +589,28 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an configParam) case proxyContract: items = nil - case reputationContract: - case subnetContract: default: panic(fmt.Sprintf("invalid contract name: %s", ctrName)) } return items } +func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item, error) { + cs, err := c.Client.GetContractStateByID(1) + if err != nil { + return nil, fmt.Errorf("NNS is not yet deployed: %w", err) + } + nmHash, err := nnsResolveHash(c.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs") + if err != nil { + return nil, fmt.Errorf("can't get netmap contract hash: %w", err) + } + arr, err := unwrap.Array(c.ReadOnlyInvoker.Call(nmHash, "listConfig")) + if err != nil { + return nil, fmt.Errorf("can't fetch list of network config keys from the netmap contract") + } + return arr, err +} + func (c *initializeContext) getAlphabetDeployItems(i, n int) []any { items := make([]any, 6) items[0] = false diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go index edb7d6de5..15657a6d9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go @@ -15,6 +15,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" + nnsClient "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -284,10 +285,11 @@ func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) { } func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - switch ct := c.(type) { + switch c.(type) { case *rpcclient.Client: - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - return ct.NNSIsAvailable(nnsHash, name) + inv := invoker.New(c, nil) + reader := nnsClient.NewReader(inv, nnsHash) + return reader.IsAvailable(name) default: b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go index 27e1590cf..469b269de 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go @@ -9,6 +9,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" @@ -18,33 +19,24 @@ import ( ) // initialAlphabetNEOAmount represents the total amount of GAS distributed between alphabet nodes. -const initialAlphabetNEOAmount = native.NEOTotalSupply - -func (c *initializeContext) registerCandidates() error { - neoHash := neo.Hash - - cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neoHash, "getCandidates")) - if err != nil { - return fmt.Errorf("`getCandidates`: %w", err) - } - - if len(cc) > 0 { - c.Command.Println("Candidates are already registered.") - return nil - } +const ( + initialAlphabetNEOAmount = native.NEOTotalSupply + registerBatchSize = transaction.MaxAttributes - 1 +) +func (c *initializeContext) registerCandidateRange(start, end int) error { regPrice, err := c.getCandidateRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } w := io.NewBufBinWriter() - emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, 1) - for _, acc := range c.Accounts { - emit.AppCall(w.BinWriter, neoHash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes()) + emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, 1) + for _, acc := range c.Accounts[start:end] { + emit.AppCall(w.BinWriter, neo.Hash, "registerCandidate", callflag.States, acc.PrivateKey().PublicKey().Bytes()) emit.Opcodes(w.BinWriter, opcode.ASSERT) } - emit.AppCall(w.BinWriter, neoHash, "setRegisterPrice", callflag.States, regPrice) + emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) if w.Err != nil { panic(fmt.Sprintf("BUG: %v", w.Err)) } @@ -53,14 +45,14 @@ func (c *initializeContext) registerCandidates() error { Signer: c.getSigner(false, c.CommitteeAcc), Account: c.CommitteeAcc, }} - for i := range c.Accounts { + for _, acc := range c.Accounts[start:end] { signers = append(signers, rpcclient.SignerAccount{ Signer: transaction.Signer{ - Account: c.Accounts[i].Contract.ScriptHash(), + Account: acc.Contract.ScriptHash(), Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{neoHash}, + AllowedContracts: []util.Uint160{neo.Hash}, }, - Account: c.Accounts[i], + Account: acc, }) } @@ -73,8 +65,8 @@ func (c *initializeContext) registerCandidates() error { } network := c.CommitteeAct.GetNetwork() - for i := range c.Accounts { - if err := c.Accounts[i].SignTx(network, tx); err != nil { + for _, acc := range c.Accounts[start:end] { + if err := acc.SignTx(network, tx); err != nil { return fmt.Errorf("can't sign a transaction: %w", err) } } @@ -82,6 +74,39 @@ func (c *initializeContext) registerCandidates() error { return c.sendTx(tx, c.Command, true) } +func (c *initializeContext) registerCandidates() error { + cc, err := unwrap.Array(c.ReadOnlyInvoker.Call(neo.Hash, "getCandidates")) + if err != nil { + return fmt.Errorf("`getCandidates`: %w", err) + } + + need := len(c.Accounts) + have := len(cc) + + if need == have { + c.Command.Println("Candidates are already registered.") + return nil + } + + // Register candidates in batches in order to overcome the signers amount limit. + // See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27 + for i := 0; i < need; i += registerBatchSize { + start, end := i, i+registerBatchSize + if end > need { + end = need + } + // This check is sound because transactions are accepted/rejected atomically. + if have >= end { + continue + } + if err := c.registerCandidateRange(start, end); err != nil { + return fmt.Errorf("registering candidates %d..%d: %q", start, end-1, err) + } + } + + return nil +} + func (c *initializeContext) transferNEOToAlphabetContracts() error { neoHash := neo.Hash @@ -116,10 +141,11 @@ func (c *initializeContext) transferNEOFinished(neoHash util.Uint160) (bool, err var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") func (c *initializeContext) getCandidateRegisterPrice() (int64, error) { - switch ct := c.Client.(type) { + switch c.Client.(type) { case *rpcclient.Client: - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - return ct.GetCandidateRegisterPrice() + inv := invoker.New(c.Client, nil) + reader := neo.NewReader(inv) + return reader.GetRegisterPrice() default: neoHash := neo.Hash res, err := invokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go index 39a35b12e..e2e5aa0ad 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "strconv" "testing" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "github.com/nspcc-dev/neo-go/pkg/config" @@ -36,6 +37,12 @@ func TestInitialize(t *testing.T) { t.Run("7 nodes", func(t *testing.T) { testInitialize(t, 7) }) + t.Run("16 nodes", func(t *testing.T) { + testInitialize(t, 16) + }) + t.Run("22 nodes", func(t *testing.T) { + testInitialize(t, 22) + }) } func testInitialize(t *testing.T, committeeSize int) { @@ -101,11 +108,10 @@ func generateTestData(t *testing.T, dir string, size int) { cfg := config.Config{} cfg.ProtocolConfiguration.Magic = 12345 cfg.ProtocolConfiguration.ValidatorsCount = size - cfg.ProtocolConfiguration.SecondsPerBlock = 1 //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 + cfg.ProtocolConfiguration.TimePerBlock = time.Second cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters cfg.ProtocolConfiguration.P2PSigExtensions = true cfg.ProtocolConfiguration.VerifyTransactions = true - cfg.ProtocolConfiguration.VerifyBlocks = true //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 data, err := yaml.Marshal(cfg) require.NoError(t, err) diff --git a/cmd/frostfs-adm/internal/modules/morph/internal/types.go b/cmd/frostfs-adm/internal/modules/morph/internal/types.go deleted file mode 100644 index 93d2cb00e..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/internal/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package internal - -import ( - "fmt" - "strconv" - - "google.golang.org/protobuf/proto" -) - -// StringifySubnetClientGroupID returns string representation of SubnetClientGroupID using MarshalText. -// Returns a string with a message on error. -func StringifySubnetClientGroupID(id *SubnetClientGroupID) string { - text, err := id.MarshalText() - if err != nil { - return fmt.Sprintf(" %v", err) - } - - return string(text) -} - -// MarshalText encodes SubnetClientGroupID into text format according to FrostFS API V2 protocol: -// value in base-10 integer string format. -// -// It implements encoding.TextMarshaler. -func (x *SubnetClientGroupID) MarshalText() ([]byte, error) { - num := x.GetValue() // NPE safe, returns zero on nil - - return []byte(strconv.FormatUint(uint64(num), 10)), nil -} - -// UnmarshalText decodes the SubnetID from the text according to FrostFS API V2 protocol: -// should be base-10 integer string format with bitsize = 32. -// -// Returns strconv.ErrRange if integer overflows uint32. -// -// Must not be called on nil. -// -// Implements encoding.TextUnmarshaler. -func (x *SubnetClientGroupID) UnmarshalText(txt []byte) error { - num, err := strconv.ParseUint(string(txt), 10, 32) - if err != nil { - return fmt.Errorf("invalid numeric value: %w", err) - } - - x.SetNumber(uint32(num)) - - return nil -} - -// Marshal encodes the SubnetClientGroupID into a binary format of FrostFS API V2 protocol -// (Protocol Buffers with direct field order). -func (x *SubnetClientGroupID) Marshal() ([]byte, error) { - return proto.Marshal(x) -} - -// Unmarshal decodes the SubnetClientGroupID from FrostFS API V2 binary format (see Marshal). Must not be called on nil. -func (x *SubnetClientGroupID) Unmarshal(data []byte) error { - return proto.Unmarshal(data, x) -} - -// SetNumber sets SubnetClientGroupID value in uint32 format. Must not be called on nil. -// By default, number is 0. -func (x *SubnetClientGroupID) SetNumber(num uint32) { - x.Value = num -} diff --git a/cmd/frostfs-adm/internal/modules/morph/internal/types.pb.go b/cmd/frostfs-adm/internal/modules/morph/internal/types.pb.go deleted file mode 100644 index 6754bf66e..000000000 Binary files a/cmd/frostfs-adm/internal/modules/morph/internal/types.pb.go and /dev/null differ diff --git a/cmd/frostfs-adm/internal/modules/morph/internal/types.proto b/cmd/frostfs-adm/internal/modules/morph/internal/types.proto deleted file mode 100644 index 2ce61b3c0..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/internal/types.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package neo.fs.v2.refs; - -option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/internal"; - -// Client group identifier in the FrostFS subnet. -// -// String representation of a value is base-10 integer. -// -// JSON representation is an object containing single `value` number field. -message SubnetClientGroupID { - // 4-byte integer identifier of the subnet client group. - fixed32 value = 1 [json_name = "value"]; -} diff --git a/cmd/frostfs-adm/internal/modules/morph/local_client.go b/cmd/frostfs-adm/internal/modules/morph/local_client.go index 94ccedb37..816f9da4c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/local_client.go @@ -248,7 +248,7 @@ func (l *localClient) GetVersion() (*result.Version, error) { }, nil } -func (l *localClient) InvokeContractVerify(contract util.Uint160, params []smartcontract.Parameter, signers []transaction.Signer, witnesses ...transaction.Witness) (*result.Invoke, error) { +func (l *localClient) InvokeContractVerify(util.Uint160, []smartcontract.Parameter, []transaction.Signer, ...transaction.Witness) (*result.Invoke, error) { // not used by `morph init` command panic("unexpected call") } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go new file mode 100644 index 000000000..23cfd120c --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go @@ -0,0 +1,46 @@ +package morph + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" + "github.com/spf13/viper" +) + +func getDefaultNetmapContractConfigMap() map[string]any { + m := make(map[string]any) + m[netmap.EpochDurationConfig] = viper.GetInt64(epochDurationInitFlag) + m[netmap.MaxObjectSizeConfig] = viper.GetInt64(maxObjectSizeInitFlag) + m[netmap.AuditFeeConfig] = viper.GetInt64(auditFeeInitFlag) + m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag) + m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag) + m[netmap.BasicIncomeRateConfig] = viper.GetInt64(incomeRateInitFlag) + m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag) + m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag) + m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag) + m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(maintenanceModeAllowedInitFlag) + return m +} + +func parseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) { + m := make(map[string][]byte, len(arr)) + for _, param := range arr { + tuple, ok := param.Value().([]stackitem.Item) + if !ok || len(tuple) != 2 { + return nil, errors.New("invalid ListConfig response from netmap contract") + } + + k, err := tuple[0].TryBytes() + if err != nil { + return nil, errors.New("invalid config key from netmap contract") + } + + v, err := tuple[1].TryBytes() + if err != nil { + return nil, invalidConfigValueErr(string(k)) + } + m[string(k)] = v + } + return m, nil +} diff --git a/cmd/frostfs-adm/internal/modules/morph/root.go b/cmd/frostfs-adm/internal/modules/morph/root.go index e92fd2160..431be125c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/root.go @@ -255,7 +255,6 @@ func init() { initRestoreContainersCmd() initListContainersCmd() initRefillGasCmd() - initSubnetCmd() initDepositoryNotaryCmd() initNetmapCandidatesCmd() } @@ -274,10 +273,6 @@ func initDepositoryNotaryCmd() { depositNotaryCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks") } -func initSubnetCmd() { - RootCmd.AddCommand(cmdSubnet) -} - func initRefillGasCmd() { RootCmd.AddCommand(refillGasCmd) refillGasCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir") @@ -314,7 +309,8 @@ func initUpdateContractsCmd() { RootCmd.AddCommand(updateContractsCmd) updateContractsCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir") updateContractsCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint") - updateContractsCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts (default fetched from latest github release)") + updateContractsCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts") + _ = updateContractsCmd.MarkFlagRequired(contractsInitFlag) } func initDumpBalancesCmd() { @@ -375,7 +371,8 @@ func initInitCmd() { RootCmd.AddCommand(initCmd) initCmd.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir") initCmd.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint") - initCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts (default fetched from latest github release)") + initCmd.Flags().String(contractsInitFlag, "", "Path to archive with compiled FrostFS contracts") + _ = initCmd.MarkFlagRequired(contractsInitFlag) initCmd.Flags().Uint(epochDurationCLIFlag, 240, "Amount of side chain blocks in one FrostFS epoch") initCmd.Flags().Uint(maxObjectSizeCLIFlag, 67108864, "Max single object size in bytes") initCmd.Flags().Bool(homomorphicHashDisabledCLIFlag, false, "Disable object homomorphic hashing") diff --git a/cmd/frostfs-adm/internal/modules/morph/subnet.go b/cmd/frostfs-adm/internal/modules/morph/subnet.go deleted file mode 100644 index bdead7730..000000000 --- a/cmd/frostfs-adm/internal/modules/morph/subnet.go +++ /dev/null @@ -1,1101 +0,0 @@ -package morph - -import ( - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/hash" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func viperBindFlags(cmd *cobra.Command, flags ...string) { - for i := range flags { - _ = viper.BindPFlag(flags[i], cmd.Flags().Lookup(flags[i])) - } -} - -// subnet command section. -var cmdSubnet = &cobra.Command{ - Use: "subnet", - Short: "FrostFS subnet management", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - endpointFlag, - ) - }, -} - -// shared flags of cmdSubnet sub-commands. -const ( - flagSubnet = "subnet" // subnet identifier - flagSubnetGroup = "group" // subnet client group ID - flagSubnetWallet = "wallet" // filepath to wallet - flagSubnetAddress = "address" // address in the wallet, optional -) - -// reads wallet from the filepath configured in flagSubnetWallet flag, -// looks for address specified in flagSubnetAddress flag (uses default -// address if flag is empty) and decrypts private key. -func readSubnetKey(key *keys.PrivateKey) error { - // read wallet from file - - walletPath := viper.GetString(flagSubnetWallet) - if walletPath == "" { - return errors.New("missing path to wallet") - } - - w, err := wallet.NewWalletFromFile(walletPath) - if err != nil { - return fmt.Errorf("read wallet from file: %w", err) - } - - // read account from the wallet - - var ( - addr util.Uint160 - addrStr = viper.GetString(flagSubnetAddress) - ) - - if addrStr == "" { - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - if err != nil { - return fmt.Errorf("read wallet address: %w", err) - } - } - - acc := w.GetAccount(addr) - if acc == nil { - return fmt.Errorf("address %s not found in %s", addrStr, walletPath) - } - - // read password - pass, err := input.ReadPassword("Enter password > ") - if err != nil { - return fmt.Errorf("read password: %w", err) - } - - // decrypt with just read password - err = acc.Decrypt(pass, keys.NEP2ScryptParams()) - if err != nil { - return fmt.Errorf("decrypt wallet: %w", err) - } - - *key = *acc.PrivateKey() - - return nil -} - -// create subnet command. -var cmdSubnetCreate = &cobra.Command{ - Use: "create", - Short: "Create FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetWallet, - flagSubnetAddress, - ) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - // read private key - var key keys.PrivateKey - - err := readSubnetKey(&key) - if err != nil { - return fmt.Errorf("read private key: %w", err) - } - - // generate subnet ID and marshal it - var ( - id subnetid.ID - num uint32 - ) - - for { - num = rand.Uint32() - - id.SetNumeric(num) - - if !subnetid.IsZero(id) { - break - } - } - - // declare creator ID and encode it - var creator user.ID - user.IDFromKey(&creator, key.PrivateKey.PublicKey) - - // fill subnet info and encode it - var info subnet.Info - - info.SetID(id) - info.SetOwner(creator) - - err = invokeMethod(key, true, "put", id.Marshal(), key.PublicKey().Bytes(), info.Marshal()) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - cmd.Printf("Create subnet request sent successfully. ID: %s.\n", &id) - - return nil - }, -} - -// cmdSubnetRemove flags. -const ( - // subnet ID to be removed. - flagSubnetRemoveID = flagSubnet -) - -// errZeroSubnet is returned on attempts to work with zero subnet which is virtual. -var errZeroSubnet = errors.New("zero subnet") - -// remove subnet command. -var cmdSubnetRemove = &cobra.Command{ - Use: "remove", - Short: "Remove FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetWallet, - flagSubnetAddress, - flagSubnetRemoveID, - ) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - // read private key - var key keys.PrivateKey - - err := readSubnetKey(&key) - if err != nil { - return fmt.Errorf("read private key: %w", err) - } - - // read ID and encode it - var id subnetid.ID - - err = id.DecodeString(viper.GetString(flagSubnetRemoveID)) - if err != nil { - return fmt.Errorf("decode ID text: %w", err) - } - - if subnetid.IsZero(id) { - return errZeroSubnet - } - - err = invokeMethod(key, false, "delete", id.Marshal()) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - cmd.Println("Remove subnet request sent successfully") - - return nil - }, -} - -// cmdSubnetGet flags. -const ( - // subnet ID to be read. - flagSubnetGetID = flagSubnet -) - -// get subnet command. -var cmdSubnetGet = &cobra.Command{ - Use: "get", - Short: "Read information about the FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetGetID, - ) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - // read ID and encode it - var id subnetid.ID - - err := id.DecodeString(viper.GetString(flagSubnetGetID)) - if err != nil { - return fmt.Errorf("decode ID text: %w", err) - } - - if subnetid.IsZero(id) { - return errZeroSubnet - } - - // use random key to fetch the data - // we could use raw neo-go client to perform testInvoke - // without keys, as it is done in other commands - key, err := keys.NewPrivateKey() - if err != nil { - return fmt.Errorf("init subnet client: %w", err) - } - - res, err := testInvokeMethod(*key, "get", id.Marshal()) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - if len(res) == 0 { - return errors.New("subnet does not exist") - } - - data, err := client.BytesFromStackItem(res[0]) - if err != nil { - return fmt.Errorf("decoding contract response: %w", err) - } - - // decode info - var info subnet.Info - if err = info.Unmarshal(data); err != nil { - return fmt.Errorf("decode subnet info: %w", err) - } - - // print information - cmd.Printf("Owner: %s\n", info.Owner()) - - return nil - }, -} - -// cmdSubnetAdmin subnet flags. -const ( - flagSubnetAdminSubnet = flagSubnet // subnet ID to be managed - flagSubnetAdminID = "admin" // admin public key - flagSubnetAdminClient = "client" // manage client admins instead of node ones -) - -// command to manage subnet admins. -var cmdSubnetAdmin = &cobra.Command{ - Use: "admin", - Short: "Manage administrators of the FrostFS subnet", - PreRun: func(cmd *cobra.Command, args []string) { - viperBindFlags(cmd, - flagSubnetWallet, - flagSubnetAddress, - flagSubnetAdminSubnet, - flagSubnetAdminID, - ) - }, -} - -// cmdSubnetAdminAdd flags. -const ( - flagSubnetAdminAddGroup = flagSubnetGroup // client group ID -) - -// common executor cmdSubnetAdminAdd and cmdSubnetAdminRemove commands. -func manageSubnetAdmins(cmd *cobra.Command, rm bool) error { - // read private key - var key keys.PrivateKey - - err := readSubnetKey(&key) - if err != nil { - return fmt.Errorf("read private key: %w", err) - } - - // read ID and encode it - var id subnetid.ID - - err = id.DecodeString(viper.GetString(flagSubnetAdminSubnet)) - if err != nil { - return fmt.Errorf("decode ID text: %w", err) - } - - if subnetid.IsZero(id) { - return errZeroSubnet - } - - // read admin key and decode it - binAdminKey, err := hex.DecodeString(viper.GetString(flagSubnetAdminID)) - if err != nil { - return fmt.Errorf("decode admin key text: %w", err) - } - - var pubkey keys.PublicKey - if err = pubkey.DecodeBytes(binAdminKey); err != nil { - return fmt.Errorf("admin key format: %w", err) - } - - return invokeMethodWithParams(cmd, id, rm, binAdminKey, key) -} - -func invokeMethodWithParams(cmd *cobra.Command, id subnetid.ID, rm bool, binAdminKey []byte, key keys.PrivateKey) error { - prm := make([]any, 0, 3) - prm = append(prm, id.Marshal()) - - var method string - - if viper.GetBool(flagSubnetAdminClient) { - var groupID internal.SubnetClientGroupID - - err := groupID.UnmarshalText([]byte(viper.GetString(flagSubnetAdminAddGroup))) - if err != nil { - return fmt.Errorf("decode group ID text: %w", err) - } - - binGroupID, err := groupID.Marshal() - if err != nil { - return fmt.Errorf("marshal group ID: %w", err) - } - - if rm { - method = "removeClientAdmin" - } else { - method = "addClientAdmin" - } - - prm = append(prm, binGroupID) - } else { - if rm { - method = "removeNodeAdmin" - } else { - method = "addNodeAdmin" - } - } - - prm = append(prm, binAdminKey) - - err := invokeMethod(key, false, method, prm...) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - var op string - - if rm { - op = "Remove" - } else { - op = "Add" - } - - cmd.Printf("%s admin request sent successfully.\n", op) - - return nil -} - -// command to add subnet admin. -var cmdSubnetAdminAdd = &cobra.Command{ - Use: "add", - Short: "Add admin to the FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetAdminAddGroup, - flagSubnetAdminClient, - ) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetAdmins(cmd, false) - }, -} - -// command to remove subnet admin. -var cmdSubnetAdminRemove = &cobra.Command{ - Use: "remove", - Short: "Remove admin of the FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetAdminClient, - ) - }, - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetAdmins(cmd, true) - }, -} - -// cmdSubnetClient flags. -const ( - flagSubnetClientSubnet = flagSubnet // ID of the subnet to be managed - flagSubnetClientID = flagSubnetAdminClient // client's NeoFS ID - flagSubnetClientGroup = flagSubnetGroup // ID of the subnet client group -) - -// command to manage subnet clients. -var cmdSubnetClient = &cobra.Command{ - Use: "client", - Short: "Manage clients of the FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetWallet, - flagSubnetAddress, - flagSubnetClientSubnet, - flagSubnetClientID, - flagSubnetClientGroup, - ) - }, -} - -// common executor cmdSubnetClientAdd and cmdSubnetClientRemove commands. -func manageSubnetClients(cmd *cobra.Command, rm bool) error { - // read private key - var key keys.PrivateKey - - err := readSubnetKey(&key) - if err != nil { - return fmt.Errorf("read private key: %w", err) - } - - // read ID and encode it - var id subnetid.ID - - err = id.DecodeString(viper.GetString(flagSubnetClientSubnet)) - if err != nil { - return fmt.Errorf("decode ID text: %w", err) - } - - if subnetid.IsZero(id) { - return errZeroSubnet - } - - // read client ID and encode it - var clientID user.ID - - err = clientID.DecodeString(viper.GetString(flagSubnetClientID)) - if err != nil { - return fmt.Errorf("decode client ID text: %w", err) - } - - // read group ID and encode it - var groupID internal.SubnetClientGroupID - - err = groupID.UnmarshalText([]byte(viper.GetString(flagSubnetAdminAddGroup))) - if err != nil { - return fmt.Errorf("decode group ID text: %w", err) - } - - binGroupID, err := groupID.Marshal() - if err != nil { - return fmt.Errorf("marshal group ID: %w", err) - } - - var method string - if rm { - method = "removeUser" - } else { - method = "addUser" - } - - err = invokeMethod(key, false, method, id.Marshal(), binGroupID, clientID.WalletBytes()) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - var op string - - if rm { - op = "Remove" - } else { - op = "Add" - } - - cmd.Printf("%s client request sent successfully.\n", op) - - return nil -} - -// command to add subnet client. -var cmdSubnetClientAdd = &cobra.Command{ - Use: "add", - Short: "Add client to the FrostFS subnet", - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetClients(cmd, false) - }, -} - -// command to remove subnet client. -var cmdSubnetClientRemove = &cobra.Command{ - Use: "remove", - Short: "Remove client of the FrostFS subnet", - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetClients(cmd, true) - }, -} - -// cmdSubnetNode flags. -const ( - flagSubnetNode = "node" // node ID - flagSubnetNodeSubnet = flagSubnet // ID of the subnet to be managed -) - -// common executor cmdSubnetNodeAdd and cmdSubnetNodeRemove commands. -func manageSubnetNodes(cmd *cobra.Command, rm bool) error { - // read private key - var key keys.PrivateKey - - err := readSubnetKey(&key) - if err != nil { - return fmt.Errorf("read private key: %w", err) - } - - // read ID and encode it - var id subnetid.ID - - err = id.DecodeString(viper.GetString(flagSubnetNodeSubnet)) - if err != nil { - return fmt.Errorf("decode ID text: %w", err) - } - - if subnetid.IsZero(id) { - return errZeroSubnet - } - - // read node ID and encode it - binNodeID, err := hex.DecodeString(viper.GetString(flagSubnetNode)) - if err != nil { - return fmt.Errorf("decode node ID text: %w", err) - } - - var pubkey keys.PublicKey - if err = pubkey.DecodeBytes(binNodeID); err != nil { - return fmt.Errorf("node ID format: %w", err) - } - - var method string - if rm { - method = "removeNode" - } else { - method = "addNode" - } - - err = invokeMethod(key, false, method, id.Marshal(), binNodeID) - if err != nil { - return fmt.Errorf("morph invocation: %w", err) - } - - var op string - - if rm { - op = "Remove" - } else { - op = "Add" - } - - cmd.Printf("%s node request sent successfully.\n", op) - - return nil -} - -// command to manage subnet nodes. -var cmdSubnetNode = &cobra.Command{ - Use: "node", - Short: "Manage nodes of the FrostFS subnet", - PreRun: func(cmd *cobra.Command, _ []string) { - viperBindFlags(cmd, - flagSubnetWallet, - flagSubnetNode, - flagSubnetNodeSubnet, - ) - }, -} - -// command to add subnet node. -var cmdSubnetNodeAdd = &cobra.Command{ - Use: "add", - Short: "Add node to the FrostFS subnet", - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetNodes(cmd, false) - }, -} - -// command to remove subnet node. -var cmdSubnetNodeRemove = &cobra.Command{ - Use: "remove", - Short: "Remove node from the FrostFS subnet", - RunE: func(cmd *cobra.Command, _ []string) error { - return manageSubnetNodes(cmd, true) - }, -} - -// returns function which calls PreRun on parent if it exists. -func inheritPreRun(preRun func(*cobra.Command, []string)) func(*cobra.Command, []string) { - return func(cmd *cobra.Command, args []string) { - par := cmd.Parent() - if par != nil && par.PreRun != nil { - par.PreRun(par, args) - } - - if preRun != nil { - preRun(cmd, args) - } - } -} - -// inherits PreRun function of parent command in all sub-commands and -// adds them to the parent. -func addCommandInheritPreRun(par *cobra.Command, subs ...*cobra.Command) { - for _, sub := range subs { - sub.PreRun = inheritPreRun(sub.PreRun) - } - - par.AddCommand(subs...) -} - -// registers flags and binds sub-commands for subnet commands. -func init() { - initCreateSubnetFlags() - initGetSubnetFlags() - initRemoveSubnetFlags() - initSubnetAdminFlags() - initSubnetAdminAddFlags() - initSubnetAdminRemoveFlags() - initClientManagementFlags() - - // add all admin managing commands to corresponding command section - addCommandInheritPreRun(cmdSubnetAdmin, - cmdSubnetAdminAdd, - cmdSubnetAdminRemove, - ) - - // add all client managing commands to corresponding command section - addCommandInheritPreRun(cmdSubnetClient, - cmdSubnetClientAdd, - cmdSubnetClientRemove, - ) - - initSubnetNodeFlags() - - // add all node managing commands to corresponding command section - addCommandInheritPreRun(cmdSubnetNode, - cmdSubnetNodeAdd, - cmdSubnetNodeRemove, - ) - - initSubnetGlobalFlags() - - // add all subnet commands to corresponding command section - addCommandInheritPreRun(cmdSubnet, - cmdSubnetCreate, - cmdSubnetRemove, - cmdSubnetGet, - cmdSubnetAdmin, - cmdSubnetClient, - cmdSubnetNode, - ) -} - -func initSubnetGlobalFlags() { - cmdSubnetFlags := cmdSubnet.PersistentFlags() - cmdSubnetFlags.StringP(endpointFlag, "r", "", "N3 RPC node endpoint") - _ = cmdSubnet.MarkFlagRequired(endpointFlag) -} - -func initSubnetNodeFlags() { - nodeFlags := cmdSubnetNode.PersistentFlags() - nodeFlags.StringP(flagSubnetWallet, "w", "", "Path to file with wallet") - _ = cmdSubnetNode.MarkFlagRequired(flagSubnetWallet) - nodeFlags.String(flagSubnetNode, "", "Hex-encoded public key of the node") - _ = cmdSubnetNode.MarkFlagRequired(flagSubnetNode) - nodeFlags.String(flagSubnetNodeSubnet, "", "ID of the subnet to manage nodes") - _ = cmdSubnetNode.MarkFlagRequired(flagSubnetNodeSubnet) -} - -func initClientManagementFlags() { - clientFlags := cmdSubnetClient.PersistentFlags() - clientFlags.String(flagSubnetClientSubnet, "", "ID of the subnet to be managed") - _ = cmdSubnetClient.MarkFlagRequired(flagSubnetClientSubnet) - clientFlags.String(flagSubnetClientGroup, "", "ID of the client group to work with") - _ = cmdSubnetClient.MarkFlagRequired(flagSubnetClientGroup) - clientFlags.String(flagSubnetClientID, "", "Client's user ID in FrostFS system in text format") - _ = cmdSubnetClient.MarkFlagRequired(flagSubnetClientID) - clientFlags.StringP(flagSubnetWallet, "w", "", "Path to file with wallet") - _ = cmdSubnetClient.MarkFlagRequired(flagSubnetWallet) - clientFlags.StringP(flagSubnetAddress, "a", "", "Address in the wallet, optional") -} - -func initSubnetAdminRemoveFlags() { - cmdSubnetAdminRemoveFlags := cmdSubnetAdminRemove.Flags() - cmdSubnetAdminRemoveFlags.Bool(flagSubnetAdminClient, false, "Remove client admin instead of node one") -} - -func initSubnetAdminAddFlags() { - cmdSubnetAdminAddFlags := cmdSubnetAdminAdd.Flags() - cmdSubnetAdminAddFlags.String(flagSubnetAdminAddGroup, "", fmt.Sprintf( - "Client group ID in text format (needed with --%s only)", flagSubnetAdminClient)) - cmdSubnetAdminAddFlags.Bool(flagSubnetAdminClient, false, "Add client admin instead of node one") -} - -func initSubnetAdminFlags() { - adminFlags := cmdSubnetAdmin.PersistentFlags() - adminFlags.String(flagSubnetAdminSubnet, "", "ID of the subnet to manage administrators") - _ = cmdSubnetAdmin.MarkFlagRequired(flagSubnetAdminSubnet) - adminFlags.String(flagSubnetAdminID, "", "Hex-encoded public key of the admin") - _ = cmdSubnetAdmin.MarkFlagRequired(flagSubnetAdminID) - adminFlags.StringP(flagSubnetWallet, "w", "", "Path to file with wallet") - _ = cmdSubnetAdmin.MarkFlagRequired(flagSubnetWallet) - adminFlags.StringP(flagSubnetAddress, "a", "", "Address in the wallet, optional") -} - -func initRemoveSubnetFlags() { - cmdSubnetRemove.Flags().String(flagSubnetRemoveID, "", "ID of the subnet to remove") - _ = cmdSubnetRemove.MarkFlagRequired(flagSubnetRemoveID) - cmdSubnetRemove.Flags().StringP(flagSubnetWallet, "w", "", "Path to file with wallet") - _ = cmdSubnetRemove.MarkFlagRequired(flagSubnetWallet) - cmdSubnetRemove.Flags().StringP(flagSubnetAddress, "a", "", "Address in the wallet, optional") -} - -func initGetSubnetFlags() { - cmdSubnetGet.Flags().String(flagSubnetGetID, "", "ID of the subnet to read") - _ = cmdSubnetAdminAdd.MarkFlagRequired(flagSubnetGetID) -} - -func initCreateSubnetFlags() { - cmdSubnetCreate.Flags().StringP(flagSubnetWallet, "w", "", "Path to file with wallet") - _ = cmdSubnetCreate.MarkFlagRequired(flagSubnetWallet) - cmdSubnetCreate.Flags().StringP(flagSubnetAddress, "a", "", "Address in the wallet, optional") -} - -func testInvokeMethod(key keys.PrivateKey, method string, args ...any) ([]stackitem.Item, error) { - c, err := getN3Client(viper.GetViper()) - if err != nil { - return nil, fmt.Errorf("morph client creation: %w", err) - } - - nnsCs, err := c.GetContractStateByID(1) - if err != nil { - return nil, fmt.Errorf("NNS contract resolving: %w", err) - } - - cosigner := []transaction.Signer{ - { - Account: key.PublicKey().GetScriptHash(), - Scopes: transaction.Global, - }, - } - - inv := invoker.New(c, cosigner) - - subnetHash, err := nnsResolveHash(inv, nnsCs.Hash, subnetContract+".frostfs") - if err != nil { - return nil, fmt.Errorf("subnet hash resolving: %w", err) - } - - res, err := inv.Call(subnetHash, method, args...) - if err != nil { - return nil, fmt.Errorf("invocation parameters prepararion: %w", err) - } - - err = checkInvocationResults(res) - if err != nil { - return nil, err - } - - return res.Stack, nil -} - -func invokeMethod(key keys.PrivateKey, tryNotary bool, method string, args ...any) error { - c, err := getN3Client(viper.GetViper()) - if err != nil { - return fmt.Errorf("morph client creation: %w", err) - } - - if tryNotary { - cc, err := c.GetNativeContracts() - if err != nil { - return fmt.Errorf("native hashes: %w", err) - } - - var notary bool - var notaryHash util.Uint160 - for _, c := range cc { - if c.Manifest.Name == nativenames.Notary { - notary = len(c.UpdateHistory) > 0 - notaryHash = c.Hash - - break - } - } - - if notary { - err = invokeNotary(c, key, method, notaryHash, args...) - if err != nil { - return fmt.Errorf("notary invocation: %w", err) - } - - return nil - } - } - - err = invokeNonNotary(c, key, method, args...) - if err != nil { - return fmt.Errorf("non-notary invocation: %w", err) - } - - return nil -} - -func invokeNonNotary(c Client, key keys.PrivateKey, method string, args ...any) error { - nnsCs, err := c.GetContractStateByID(1) - if err != nil { - return fmt.Errorf("NNS contract resolving: %w", err) - } - - acc := wallet.NewAccountFromPrivateKey(&key) - - cosigner := []transaction.Signer{ - { - Account: key.PublicKey().GetScriptHash(), - Scopes: transaction.Global, - }, - } - - cosignerAcc := []rpcclient.SignerAccount{ - { - Signer: cosigner[0], - Account: acc, - }, - } - - inv := invoker.New(c, cosigner) - - subnetHash, err := nnsResolveHash(inv, nnsCs.Hash, subnetContract+".frostfs") - if err != nil { - return fmt.Errorf("subnet hash resolving: %w", err) - } - - test, err := inv.Call(subnetHash, method, args...) - if err != nil { - return fmt.Errorf("test invocation: %w", err) - } - - err = checkInvocationResults(test) - if err != nil { - return err - } - - _, err = c.SignAndPushInvocationTx(test.Script, acc, test.GasConsumed, 0, cosignerAcc) - if err != nil { - return fmt.Errorf("sending transaction: %w", err) - } - - return nil -} - -func invokeNotary(c Client, key keys.PrivateKey, method string, notaryHash util.Uint160, args ...any) error { - nnsCs, err := c.GetContractStateByID(1) - if err != nil { - return fmt.Errorf("NNS contract resolving: %w", err) - } - - alphabet, err := c.GetCommittee() - if err != nil { - return fmt.Errorf("alphabet list: %w", err) - } - - multisigScript, err := smartcontract.CreateDefaultMultiSigRedeemScript(alphabet) - if err != nil { - return fmt.Errorf("alphabet multi-signature script: %w", err) - } - - cosigners, err := notaryCosigners(c, notaryHash, nnsCs, key, hash.Hash160(multisigScript)) - if err != nil { - return fmt.Errorf("cosigners collecting: %w", err) - } - - inv := invoker.New(c, cosigners) - - subnetHash, err := nnsResolveHash(inv, nnsCs.Hash, subnetContract+".frostfs") - if err != nil { - return fmt.Errorf("subnet hash resolving: %w", err) - } - - test, err := makeTestInvocation(inv, subnetHash, method, args) - if err != nil { - return err - } - - multisigAccount := &wallet.Account{ - Contract: &wallet.Contract{ - Script: multisigScript, - }, - } - - bc, err := c.GetBlockCount() - if err != nil { - return fmt.Errorf("blockchain height: %w", err) - } - - return createAndPushTransaction(alphabet, test, bc, cosigners, c, key, multisigAccount) -} - -func makeTestInvocation(inv *invoker.Invoker, subnetHash util.Uint160, method string, args []any) (*result.Invoke, error) { - test, err := inv.Call(subnetHash, method, args...) - if err != nil { - return nil, fmt.Errorf("test invocation: %w", err) - } - - err = checkInvocationResults(test) - if err != nil { - return nil, err - } - return test, nil -} - -func createAndPushTransaction(alphabet keys.PublicKeys, test *result.Invoke, blockCount uint32, cosigners []transaction.Signer, - client Client, key keys.PrivateKey, multisigAccount *wallet.Account) error { - // alphabet multisig + key signature - signersNumber := uint8(smartcontract.GetDefaultHonestNodeCount(len(alphabet)) + 1) - - // notaryRequestValidity is number of blocks during - // witch notary request is considered valid - const notaryRequestValidity = 100 - - mainTx := &transaction.Transaction{ - Nonce: rand.Uint32(), - SystemFee: test.GasConsumed, - ValidUntilBlock: blockCount + notaryRequestValidity, - Script: test.Script, - Attributes: []transaction.Attribute{ - { - Type: transaction.NotaryAssistedT, - Value: &transaction.NotaryAssisted{NKeys: signersNumber}, - }, - }, - Signers: cosigners, - } - - notaryFee, err := client.CalculateNotaryFee(signersNumber) - if err != nil { - return err - } - - acc := wallet.NewAccountFromPrivateKey(&key) - aa := notaryAccounts(multisigAccount, acc) - - err = client.AddNetworkFee(mainTx, notaryFee, aa...) - if err != nil { - return fmt.Errorf("notary network fee adding: %w", err) - } - - mainTx.Scripts = notaryWitnesses(client, multisigAccount, acc, mainTx) - - _, err = client.SignAndPushP2PNotaryRequest(mainTx, - []byte{byte(opcode.RET)}, - -1, - 0, - 40, - acc) - if err != nil { - return fmt.Errorf("sending notary request: %w", err) - } - - return nil -} - -func notaryCosigners(c Client, notaryHash util.Uint160, nnsCs *state.Contract, - key keys.PrivateKey, alphabetAccount util.Uint160) ([]transaction.Signer, error) { - proxyHash, err := nnsResolveHash(invoker.New(c, nil), nnsCs.Hash, proxyContract+".frostfs") - if err != nil { - return nil, fmt.Errorf("proxy hash resolving: %w", err) - } - - return []transaction.Signer{ - { - Account: proxyHash, - Scopes: transaction.None, - }, - { - Account: alphabetAccount, - Scopes: transaction.Global, - }, - { - Account: hash.Hash160(key.PublicKey().GetVerificationScript()), - Scopes: transaction.Global, - }, - { - Account: notaryHash, - Scopes: transaction.None, - }, - }, nil -} - -func notaryAccounts(alphabet, acc *wallet.Account) []*wallet.Account { - return []*wallet.Account{ - // proxy - { - Contract: &wallet.Contract{ - Deployed: true, - }, - }, - alphabet, - // caller's account - acc, - // last one is a placeholder for notary contract account - { - Contract: &wallet.Contract{}, - }, - } -} - -func notaryWitnesses(c Client, alphabet, acc *wallet.Account, tx *transaction.Transaction) []transaction.Witness { - ww := make([]transaction.Witness, 0, 4) - - // empty proxy contract witness - ww = append(ww, transaction.Witness{ - InvocationScript: []byte{}, - VerificationScript: []byte{}, - }) - - // alphabet multi-address witness - ww = append(ww, transaction.Witness{ - InvocationScript: append( - []byte{byte(opcode.PUSHDATA1), 64}, - make([]byte, 64)..., - ), - VerificationScript: alphabet.GetVerificationScript(), - }) - - magicNumber, _ := c.GetNetwork() - - // caller's witness - ww = append(ww, transaction.Witness{ - InvocationScript: append( - []byte{byte(opcode.PUSHDATA1), 64}, - acc.PrivateKey().SignHashable(uint32(magicNumber), tx)...), - VerificationScript: acc.GetVerificationScript(), - }) - - // notary contract witness - ww = append(ww, transaction.Witness{ - InvocationScript: append( - []byte{byte(opcode.PUSHDATA1), 64}, - make([]byte, 64)..., - ), - VerificationScript: []byte{}, - }) - - return ww -} - -func checkInvocationResults(res *result.Invoke) error { - if res.State != "HALT" { - return fmt.Errorf("test invocation state: %s, exception %s: ", res.State, res.FaultException) - } - - if len(res.Script) == 0 { - return errors.New("empty invocation script") - } - - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index 4e30d5e62..fd1517f90 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -52,7 +52,7 @@ func Execute() error { return rootCmd.Execute() } -func entryPoint(cmd *cobra.Command, args []string) error { +func entryPoint(cmd *cobra.Command, _ []string) error { printVersion, _ := cmd.Flags().GetBool("version") if printVersion { cmd.Print(misc.BuildInfo("FrostFS Adm")) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go index 435f79dfb..a07ce32c6 100644 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go @@ -12,9 +12,6 @@ node: - {{ .AnnouncedAddress }} attribute_0: UN-LOCODE:{{ .Attribute.Locode }} relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map - subnet: - exit_zero: false # toggle entrance to zero subnet (overrides corresponding attribute and occurrence in entries) - entries: [] # list of IDs of subnets to enter in a text format of FrostFS API protocol (overrides corresponding attributes) grpc: num: 1 # total number of listener endpoints diff --git a/cmd/frostfs-cli/docs/storage-node-xheaders.md b/cmd/frostfs-cli/docs/storage-node-xheaders.md index 15bce457a..f86b97ec5 100644 --- a/cmd/frostfs-cli/docs/storage-node-xheaders.md +++ b/cmd/frostfs-cli/docs/storage-node-xheaders.md @@ -26,7 +26,6 @@ If set to '0' or not set, only the current epoch is used. List of commands with support of extended headers: * `container list-objects` * `object delete/get/hash/head/lock/put/range/search` -* `storagegroup delete/get/list/put` Example: ```shell diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index cbf19eb4b..875ccf904 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -329,6 +329,8 @@ func CreateSession(prm CreateSessionPrm) (res CreateSessionRes, err error) { type PutObjectPrm struct { commonObjectPrm + copyNum []uint32 + hdr *object.Object rdr io.Reader @@ -352,6 +354,12 @@ func (x *PutObjectPrm) SetHeaderCallback(f func(*object.Object)) { x.headerCallback = f } +// SetCopiesNumberByVectors sets ordered list of minimal required object copies numbers +// per placement vector. +func (x *PutObjectPrm) SetCopiesNumberByVectors(copiesNumbers []uint32) { + x.copyNum = copiesNumbers +} + // PutObjectRes groups the resulting values of PutObject operation. type PutObjectRes struct { id oid.ID @@ -381,6 +389,7 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) { } putPrm.WithXHeaders(prm.xHeaders...) + putPrm.SetCopiesNumberByVectors(prm.copyNum) wrt, err := prm.cli.ObjectPutInit(context.Background(), putPrm) if err != nil { diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 4cf0b0a34..13dacc04c 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -36,11 +36,11 @@ func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag if err != nil { return nil, fmt.Errorf("%v: %w", errInvalidEndpoint, err) } - return GetSDKClient(cmd, key, addr) + return GetSDKClient(cmd.Context(), cmd, key, addr) } // GetSDKClient returns default frostfs-sdk-go client. -func GetSDKClient(cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) { +func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) { var ( c client.Client prmInit client.PrmInit @@ -62,7 +62,7 @@ func GetSDKClient(cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Addres c.Init(prmInit) - if err := c.Dial(prmDial); err != nil { + if err := c.Dial(ctx, prmDial); err != nil { return nil, fmt.Errorf("can't init SDK client: %w", err) } @@ -82,7 +82,7 @@ func GetCurrentEpoch(ctx context.Context, cmd *cobra.Command, endpoint string) ( return 0, fmt.Errorf("can't generate key to sign query: %w", err) } - c, err := GetSDKClient(cmd, key, addr) + c, err := GetSDKClient(ctx, cmd, key, addr) if err != nil { return 0, err } diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index 410663e84..873ef3235 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -16,7 +16,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" ) @@ -30,7 +29,6 @@ var ( containerNnsName string containerNnsZone string containerNoTimestamp bool - containerSubnet string force bool ) @@ -70,15 +68,6 @@ It will be stored in sidechain when inner ring will accepts it.`, } } - if containerSubnet != "" { - var subnetID subnetid.ID - - err = subnetID.DecodeString(containerSubnet) - commonCmd.ExitOnErr(cmd, "could not parse subnetID: %w", err) - - placementPolicy.RestrictSubnet(subnetID) - } - var cnr container.Container cnr.Init() @@ -166,7 +155,6 @@ func initContainerCreateCmd() { flags.StringVar(&containerNnsName, "nns-name", "", "Container nns name attribute") flags.StringVar(&containerNnsZone, "nns-zone", "", "Container nns zone attribute") flags.BoolVar(&containerNoTimestamp, "disable-timestamp", false, "Disable timestamp container attribute") - flags.StringVar(&containerSubnet, "subnet", "", "String representation of container subnetwork") flags.BoolVarP(&force, commonflags.ForceFlag, commonflags.ForceFlagShorthand, false, "Skip placement validity check") } diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index 9565748c3..33dd17943 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" ) @@ -16,12 +17,14 @@ import ( const ( flagListPrintAttr = "with-attr" flagListContainerOwner = "owner" + flagListName = "name" ) // flag vars of list command. var ( flagVarListPrintAttr bool flagVarListContainerOwner string + flagVarListName string ) var listContainersCmd = &cobra.Command{ @@ -52,24 +55,33 @@ var listContainersCmd = &cobra.Command{ var prmGet internalclient.GetContainerPrm prmGet.SetClient(cli) - list := res.IDList() - for i := range list { - cmd.Println(list[i].String()) + containerIDs := res.IDList() + for _, cnrID := range containerIDs { + if flagVarListName == "" && !flagVarListPrintAttr { + cmd.Println(cnrID.String()) + continue + } + + prmGet.SetContainer(cnrID) + res, err := internalclient.GetContainer(prmGet) + if err != nil { + cmd.Printf(" failed to read attributes: %v\n", err) + continue + } + + cnr := res.Container() + if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { + continue + } + cmd.Println(cnrID.String()) if flagVarListPrintAttr { - prmGet.SetContainer(list[i]) - - res, err := internalclient.GetContainer(prmGet) - if err == nil { - res.Container().IterateAttributes(func(key, val string) { - if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) { - // FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes - cmd.Printf(" %s: %s\n", key, val) - } - }) - } else { - cmd.Printf(" failed to read attributes: %v\n", err) - } + cnr.IterateAttributes(func(key, val string) { + if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) { + // FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes + cmd.Printf(" %s: %s\n", key, val) + } + }) } } }, @@ -80,6 +92,9 @@ func initContainerListContainersCmd() { flags := listContainersCmd.Flags() + flags.StringVar(&flagVarListName, flagListName, "", + "List containers by the attribute name", + ) flags.StringVar(&flagVarListContainerOwner, flagListContainerOwner, "", "Owner of containers (omit to use owner from private key)", ) diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go index 02ee88ce0..b72ff6301 100644 --- a/cmd/frostfs-cli/modules/control/evacuate_shard.go +++ b/cmd/frostfs-cli/modules/control/evacuate_shard.go @@ -8,6 +8,8 @@ import ( "github.com/spf13/cobra" ) +const ignoreErrorsFlag = "no-errors" + var evacuateShardCmd = &cobra.Command{ Use: "evacuate", Short: "Evacuate objects from shard", @@ -20,7 +22,7 @@ func evacuateShard(cmd *cobra.Command, _ []string) { req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)} req.Body.Shard_ID = getShardIDList(cmd) - req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(dumpIgnoreErrorsFlag) + req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag) signRequest(cmd, pk, req) @@ -47,7 +49,7 @@ func initControlEvacuateShardCmd() { flags := evacuateShardCmd.Flags() flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") flags.Bool(shardAllFlag, false, "Process all shards") - flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects") + flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects") evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) } diff --git a/cmd/frostfs-cli/modules/control/ir.go b/cmd/frostfs-cli/modules/control/ir.go new file mode 100644 index 000000000..ae3f8502c --- /dev/null +++ b/cmd/frostfs-cli/modules/control/ir.go @@ -0,0 +1,17 @@ +package control + +import "github.com/spf13/cobra" + +var irCmd = &cobra.Command{ + Use: "ir", + Short: "Operations with inner ring nodes", + Long: "Operations with inner ring nodes", +} + +func initControlIRCmd() { + irCmd.AddCommand(tickEpochCmd) + irCmd.AddCommand(removeNodeCmd) + + initControlIRTickEpochCmd() + initControlIRRemoveNodeCmd() +} diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go new file mode 100644 index 000000000..f5b968b7f --- /dev/null +++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go @@ -0,0 +1,58 @@ +package control + +import ( + "encoding/hex" + "errors" + + rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + "github.com/spf13/cobra" +) + +var removeNodeCmd = &cobra.Command{ + Use: "remove-node", + Short: "Forces a node removal from netmap", + Long: "Forces a node removal from netmap via a notary request. It should be executed on other IR nodes as well.", + Run: removeNode, +} + +func initControlIRRemoveNodeCmd() { + initControlFlags(removeNodeCmd) + + flags := removeNodeCmd.Flags() + flags.String("node", "", "Node public key as a hex string") + _ = removeNodeCmd.MarkFlagRequired("node") +} + +func removeNode(cmd *cobra.Command, _ []string) { + pk := key.Get(cmd) + c := getClient(cmd, pk) + + nodeKeyStr, _ := cmd.Flags().GetString("node") + if len(nodeKeyStr) == 0 { + commonCmd.ExitOnErr(cmd, "parsing node public key: ", errors.New("key cannot be empty")) + } + nodeKey, err := hex.DecodeString(nodeKeyStr) + commonCmd.ExitOnErr(cmd, "can't decode node public key: %w", err) + + req := new(ircontrol.RemoveNodeRequest) + req.SetBody(&ircontrol.RemoveNodeRequest_Body{ + Key: nodeKey, + }) + + commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req)) + + var resp *ircontrol.RemoveNodeResponse + err = c.ExecRaw(func(client *rawclient.Client) error { + resp, err = ircontrol.RemoveNode(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + cmd.Println("Node removed") +} diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go new file mode 100644 index 000000000..3e6af0081 --- /dev/null +++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go @@ -0,0 +1,43 @@ +package control + +import ( + rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + "github.com/spf13/cobra" +) + +var tickEpochCmd = &cobra.Command{ + Use: "tick-epoch", + Short: "Forces a new epoch", + Long: "Forces a new epoch via a notary request. It should be executed on other IR nodes as well.", + Run: tickEpoch, +} + +func initControlIRTickEpochCmd() { + initControlFlags(tickEpochCmd) +} + +func tickEpoch(cmd *cobra.Command, _ []string) { + pk := key.Get(cmd) + c := getClient(cmd, pk) + + req := new(ircontrol.TickEpochRequest) + req.SetBody(new(ircontrol.TickEpochRequest_Body)) + + err := ircontrolsrv.SignMessage(pk, req) + commonCmd.ExitOnErr(cmd, "could not sign request: %w", err) + + var resp *ircontrol.TickEpochResponse + err = c.ExecRaw(func(client *rawclient.Client) error { + resp, err = ircontrol.TickEpoch(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + cmd.Println("Epoch tick requested") +} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index d3b656392..015676185 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -33,6 +33,7 @@ func init() { dropObjectsCmd, shardsCmd, synchronizeTreeCmd, + irCmd, ) initControlHealthCheckCmd() @@ -40,4 +41,5 @@ func init() { initControlDropObjectsCmd() initControlShardsCmd() initControlSynchronizeTreeCmd() + initControlIRCmd() } diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go index 9d3eb5c01..8e7ecff8c 100644 --- a/cmd/frostfs-cli/modules/control/shards.go +++ b/cmd/frostfs-cli/modules/control/shards.go @@ -13,16 +13,12 @@ var shardsCmd = &cobra.Command{ func initControlShardsCmd() { shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(setShardModeCmd) - shardsCmd.AddCommand(dumpShardCmd) - shardsCmd.AddCommand(restoreShardCmd) shardsCmd.AddCommand(evacuateShardCmd) shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(doctorCmd) initControlShardsListCmd() initControlSetShardModeCmd() - initControlDumpShardCmd() - initControlRestoreShardCmd() initControlEvacuateShardCmd() initControlFlushCacheCmd() initControlDoctorCmd() diff --git a/cmd/frostfs-cli/modules/control/shards_dump.go b/cmd/frostfs-cli/modules/control/shards_dump.go deleted file mode 100644 index c0d0aca95..000000000 --- a/cmd/frostfs-cli/modules/control/shards_dump.go +++ /dev/null @@ -1,66 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "github.com/spf13/cobra" -) - -const ( - dumpFilepathFlag = "path" - dumpIgnoreErrorsFlag = "no-errors" -) - -var dumpShardCmd = &cobra.Command{ - Use: "dump", - Short: "Dump objects from shard", - Long: "Dump objects from shard to a file", - Run: dumpShard, -} - -func dumpShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - body := new(control.DumpShardRequest_Body) - body.SetShardID(getShardID(cmd)) - - p, _ := cmd.Flags().GetString(dumpFilepathFlag) - body.SetFilepath(p) - - ignore, _ := cmd.Flags().GetBool(dumpIgnoreErrorsFlag) - body.SetIgnoreErrors(ignore) - - req := new(control.DumpShardRequest) - req.SetBody(body) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.DumpShardResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.DumpShard(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard has been dumped successfully.") -} - -func initControlDumpShardCmd() { - initControlFlags(dumpShardCmd) - - flags := dumpShardCmd.Flags() - flags.String(shardIDFlag, "", "Shard ID in base58 encoding") - flags.String(dumpFilepathFlag, "", "File to write objects to") - flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects") - - _ = dumpShardCmd.MarkFlagRequired(shardIDFlag) - _ = dumpShardCmd.MarkFlagRequired(dumpFilepathFlag) - _ = dumpShardCmd.MarkFlagRequired(controlRPC) -} diff --git a/cmd/frostfs-cli/modules/control/shards_restore.go b/cmd/frostfs-cli/modules/control/shards_restore.go deleted file mode 100644 index edf97a731..000000000 --- a/cmd/frostfs-cli/modules/control/shards_restore.go +++ /dev/null @@ -1,66 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "github.com/spf13/cobra" -) - -const ( - restoreFilepathFlag = "path" - restoreIgnoreErrorsFlag = "no-errors" -) - -var restoreShardCmd = &cobra.Command{ - Use: "restore", - Short: "Restore objects from shard", - Long: "Restore objects from shard to a file", - Run: restoreShard, -} - -func restoreShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - body := new(control.RestoreShardRequest_Body) - body.SetShardID(getShardID(cmd)) - - p, _ := cmd.Flags().GetString(restoreFilepathFlag) - body.SetFilepath(p) - - ignore, _ := cmd.Flags().GetBool(restoreIgnoreErrorsFlag) - body.SetIgnoreErrors(ignore) - - req := new(control.RestoreShardRequest) - req.SetBody(body) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.RestoreShardResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.RestoreShard(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard has been restored successfully.") -} - -func initControlRestoreShardCmd() { - initControlFlags(restoreShardCmd) - - flags := restoreShardCmd.Flags() - flags.String(shardIDFlag, "", "Shard ID in base58 encoding") - flags.String(restoreFilepathFlag, "", "File to read objects from") - flags.Bool(restoreIgnoreErrorsFlag, false, "Skip invalid/unreadable objects") - - _ = restoreShardCmd.MarkFlagRequired(shardIDFlag) - _ = restoreShardCmd.MarkFlagRequired(restoreFilepathFlag) - _ = restoreShardCmd.MarkFlagRequired(controlRPC) -} diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go index 4f6e24082..135c0efa6 100644 --- a/cmd/frostfs-cli/modules/control/shards_set_mode.go +++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go @@ -137,13 +137,6 @@ func setShardMode(cmd *cobra.Command, _ []string) { cmd.Println("Shard mode update request successfully sent.") } -func getShardID(cmd *cobra.Command) []byte { - sid, _ := cmd.Flags().GetString(shardIDFlag) - raw, err := base58.Decode(sid) - commonCmd.ExitOnErr(cmd, "incorrect shard ID encoding: %w", err) - return raw -} - func getShardIDList(cmd *cobra.Command) [][]byte { all, _ := cmd.Flags().GetBool(shardAllFlag) if all { diff --git a/cmd/frostfs-cli/modules/netmap/netinfo.go b/cmd/frostfs-cli/modules/netmap/netinfo.go index f34456c71..17acfd59c 100644 --- a/cmd/frostfs-cli/modules/netmap/netinfo.go +++ b/cmd/frostfs-cli/modules/netmap/netinfo.go @@ -41,8 +41,6 @@ var netInfoCmd = &cobra.Command{ cmd.Printf(format, "Audit fee", netInfo.AuditFee()) cmd.Printf(format, "Storage price", netInfo.StoragePrice()) cmd.Printf(format, "Container fee", netInfo.ContainerFee()) - cmd.Printf(format, "EigenTrust alpha", netInfo.EigenTrustAlpha()) - cmd.Printf(format, "Number of EigenTrust iterations", netInfo.NumberOfEigenTrustIterations()) cmd.Printf(format, "Epoch duration", netInfo.EpochDuration()) cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee()) cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize()) diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index fe8e9dda9..6fd91ca4b 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -25,6 +25,7 @@ import ( const ( noProgressFlag = "no-progress" notificationFlag = "notify" + copiesNumberFlag = "copies-number" ) var putExpiredOn uint64 @@ -56,6 +57,8 @@ func initObjectPutCmd() { flags.String(notificationFlag, "", "Object notification in the form of *epoch*:*topic*; '-' topic means using default") flags.Bool(binaryFlag, false, "Deserialize object structure from given file.") + + flags.String(copiesNumberFlag, "", "Number of copies of the object to store within the RPC call") } func putObject(cmd *cobra.Command, _ []string) { @@ -116,6 +119,18 @@ func putObject(cmd *cobra.Command, _ []string) { } } + copyNum, err := cmd.Flags().GetString(copiesNumberFlag) + commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err) + if len(copyNum) > 0 { + var cn []uint32 + for _, num := range strings.Split(copyNum, ",") { + val, err := strconv.ParseUint(num, 10, 32) + commonCmd.ExitOnErr(cmd, "can't parse object copies numbers information: %w", err) + cn = append(cn, uint32(val)) + } + prm.SetCopiesNumberByVectors(cn) + } + res, err := internalclient.PutObject(prm) if p != nil { p.Finish() diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go index cf6d1d40b..93513479a 100644 --- a/cmd/frostfs-cli/modules/root.go +++ b/cmd/frostfs-cli/modules/root.go @@ -14,7 +14,6 @@ import ( netmapCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/netmap" objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" sessionCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/session" - sgCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/storagegroup" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/tree" utilCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -84,7 +83,6 @@ func init() { rootCmd.AddCommand(utilCli.Cmd) rootCmd.AddCommand(netmapCli.Cmd) rootCmd.AddCommand(objectCli.Cmd) - rootCmd.AddCommand(sgCli.Cmd) rootCmd.AddCommand(containerCli.Cmd) rootCmd.AddCommand(tree.Cmd) rootCmd.AddCommand(gendoc.Command(rootCmd)) diff --git a/cmd/frostfs-cli/modules/session/create.go b/cmd/frostfs-cli/modules/session/create.go index e1c951b93..341681f5b 100644 --- a/cmd/frostfs-cli/modules/session/create.go +++ b/cmd/frostfs-cli/modules/session/create.go @@ -54,7 +54,7 @@ func createSession(cmd *cobra.Command, _ []string) { addrStr, _ := cmd.Flags().GetString(commonflags.RPC) commonCmd.ExitOnErr(cmd, "can't parse endpoint: %w", netAddr.FromString(addrStr)) - c, err := internalclient.GetSDKClient(cmd, privKey, netAddr) + c, err := internalclient.GetSDKClient(cmd.Context(), cmd, privKey, netAddr) commonCmd.ExitOnErr(cmd, "can't create client: %w", err) lifetime := uint64(defaultLifetime) diff --git a/cmd/frostfs-cli/modules/storagegroup/delete.go b/cmd/frostfs-cli/modules/storagegroup/delete.go deleted file mode 100644 index 83751de7d..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/delete.go +++ /dev/null @@ -1,53 +0,0 @@ -package storagegroup - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -var sgDelCmd = &cobra.Command{ - Use: "delete", - Short: "Delete storage group from FrostFS", - Long: "Delete storage group from FrostFS", - Run: delSG, -} - -func initSGDeleteCmd() { - commonflags.Init(sgDelCmd) - - flags := sgDelCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = sgDelCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier") - _ = sgDelCmd.MarkFlagRequired(sgIDFlag) -} - -func delSG(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - var cnr cid.ID - var obj oid.ID - - addr := readObjectAddress(cmd, &cnr, &obj) - - var prm internalclient.DeleteObjectPrm - objectCli.OpenSession(cmd, &prm, pk, cnr, &obj) - objectCli.Prepare(cmd, &prm) - prm.SetAddress(addr) - - res, err := internalclient.DeleteObject(prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - tombstone := res.Tombstone() - - cmd.Println("Storage group removed successfully.") - cmd.Printf(" Tombstone: %s\n", tombstone) -} diff --git a/cmd/frostfs-cli/modules/storagegroup/get.go b/cmd/frostfs-cli/modules/storagegroup/get.go deleted file mode 100644 index c59d3c51c..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/get.go +++ /dev/null @@ -1,82 +0,0 @@ -package storagegroup - -import ( - "bytes" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "github.com/spf13/cobra" -) - -var sgID string - -var sgGetCmd = &cobra.Command{ - Use: "get", - Short: "Get storage group from FrostFS", - Long: "Get storage group from FrostFS", - Run: getSG, -} - -func initSGGetCmd() { - commonflags.Init(sgGetCmd) - - flags := sgGetCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = sgGetCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.StringVarP(&sgID, sgIDFlag, "", "", "Storage group identifier") - _ = sgGetCmd.MarkFlagRequired(sgIDFlag) - - flags.Bool(sgRawFlag, false, "Set raw request option") -} - -func getSG(cmd *cobra.Command, _ []string) { - var cnr cid.ID - var obj oid.ID - - addr := readObjectAddress(cmd, &cnr, &obj) - pk := key.GetOrGenerate(cmd) - buf := bytes.NewBuffer(nil) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.GetObjectPrm - objectCli.Prepare(cmd, &prm) - prm.SetClient(cli) - - raw, _ := cmd.Flags().GetBool(sgRawFlag) - prm.SetRawFlag(raw) - prm.SetAddress(addr) - prm.SetPayloadWriter(buf) - - res, err := internalclient.GetObject(prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - rawObj := res.Header() - rawObj.SetPayload(buf.Bytes()) - - var sg storagegroupSDK.StorageGroup - - err = storagegroupSDK.ReadFromObject(&sg, *rawObj) - commonCmd.ExitOnErr(cmd, "could not read storage group from the obj: %w", err) - - cmd.Printf("The last active epoch: %d\n", sg.ExpirationEpoch()) - cmd.Printf("Group size: %d\n", sg.ValidationDataSize()) - common.PrintChecksum(cmd, "Group hash", sg.ValidationDataHash) - - if members := sg.Members(); len(members) > 0 { - cmd.Println("Members:") - - for i := range members { - cmd.Printf("\t%s\n", members[i].String()) - } - } -} diff --git a/cmd/frostfs-cli/modules/storagegroup/list.go b/cmd/frostfs-cli/modules/storagegroup/list.go deleted file mode 100644 index 4c12453d5..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/list.go +++ /dev/null @@ -1,52 +0,0 @@ -package storagegroup - -import ( - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/spf13/cobra" -) - -var sgListCmd = &cobra.Command{ - Use: "list", - Short: "List storage groups in FrostFS container", - Long: "List storage groups in FrostFS container", - Run: listSG, -} - -func initSGListCmd() { - commonflags.Init(sgListCmd) - - sgListCmd.Flags().String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = sgListCmd.MarkFlagRequired(commonflags.CIDFlag) -} - -func listSG(cmd *cobra.Command, _ []string) { - var cnr cid.ID - readCID(cmd, &cnr) - - pk := key.GetOrGenerate(cmd) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - - var prm internalclient.SearchObjectsPrm - objectCli.Prepare(cmd, &prm) - prm.SetClient(cli) - prm.SetContainerID(cnr) - prm.SetFilters(storagegroup.SearchQuery()) - - res, err := internalclient.SearchObjects(prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - ids := res.IDList() - - cmd.Printf("Found %d storage groups.\n", len(ids)) - - for i := range ids { - cmd.Println(ids[i].String()) - } -} diff --git a/cmd/frostfs-cli/modules/storagegroup/put.go b/cmd/frostfs-cli/modules/storagegroup/put.go deleted file mode 100644 index f4024a833..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/put.go +++ /dev/null @@ -1,145 +0,0 @@ -package storagegroup - -import ( - "crypto/ecdsa" - "errors" - "fmt" - - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/spf13/cobra" -) - -const sgMembersFlag = "members" - -var sgMembers []string - -var sgPutCmd = &cobra.Command{ - Use: "put", - Short: "Put storage group to FrostFS", - Long: "Put storage group to FrostFS", - Run: putSG, -} - -func initSGPutCmd() { - commonflags.Init(sgPutCmd) - - flags := sgPutCmd.Flags() - - flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - _ = sgPutCmd.MarkFlagRequired(commonflags.CIDFlag) - - flags.StringSliceVarP(&sgMembers, sgMembersFlag, "m", nil, "ID list of storage group members") - _ = sgPutCmd.MarkFlagRequired(sgMembersFlag) - - flags.Uint64(commonflags.Lifetime, 0, "Storage group lifetime in epochs") - _ = sgPutCmd.MarkFlagRequired(commonflags.Lifetime) -} - -func putSG(cmd *cobra.Command, _ []string) { - pk := key.GetOrGenerate(cmd) - - var ownerID user.ID - user.IDFromKey(&ownerID, pk.PublicKey) - - var cnr cid.ID - readCID(cmd, &cnr) - - members := make([]oid.ID, len(sgMembers)) - uniqueFilter := make(map[oid.ID]struct{}, len(sgMembers)) - - for i := range sgMembers { - err := members[i].DecodeString(sgMembers[i]) - commonCmd.ExitOnErr(cmd, "could not parse object ID: %w", err) - - if _, alreadyExists := uniqueFilter[members[i]]; alreadyExists { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("%s member in not unique", members[i])) - } - - uniqueFilter[members[i]] = struct{}{} - } - - var ( - headPrm internalclient.HeadObjectPrm - putPrm internalclient.PutObjectPrm - getCnrPrm internalclient.GetContainerPrm - ) - - cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - getCnrPrm.SetClient(cli) - getCnrPrm.SetContainer(cnr) - - resGetCnr, err := internalclient.GetContainer(getCnrPrm) - commonCmd.ExitOnErr(cmd, "get container RPC call: %w", err) - - objectCli.OpenSessionViaClient(cmd, &putPrm, cli, pk, cnr, nil) - objectCli.Prepare(cmd, &headPrm, &putPrm) - - headPrm.SetRawFlag(true) - headPrm.SetClient(cli) - - sg, err := storagegroup.CollectMembers(sgHeadReceiver{ - cmd: cmd, - key: pk, - ownerID: &ownerID, - prm: headPrm, - }, cnr, members, !container.IsHomomorphicHashingDisabled(resGetCnr.Container())) - commonCmd.ExitOnErr(cmd, "could not collect storage group members: %w", err) - - var netInfoPrm internalclient.NetworkInfoPrm - netInfoPrm.SetClient(cli) - - ni, err := internalclient.NetworkInfo(netInfoPrm) - commonCmd.ExitOnErr(cmd, "can't fetch network info: %w", err) - - lifetime, _ := cmd.Flags().GetUint64(commonflags.Lifetime) - sg.SetExpirationEpoch(ni.NetworkInfo().CurrentEpoch() + lifetime) - - obj := object.New() - obj.SetContainerID(cnr) - obj.SetOwnerID(&ownerID) - - storagegroupSDK.WriteToObject(*sg, obj) - - putPrm.SetHeader(obj) - - res, err := internalclient.PutObject(putPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Println("Storage group successfully stored") - cmd.Printf(" ID: %s\n CID: %s\n", res.ID(), cnr) -} - -type sgHeadReceiver struct { - cmd *cobra.Command - key *ecdsa.PrivateKey - ownerID *user.ID - prm internalclient.HeadObjectPrm -} - -func (c sgHeadReceiver) Head(addr oid.Address) (any, error) { - c.prm.SetAddress(addr) - - res, err := internalclient.HeadObject(c.prm) - - var errSplitInfo *object.SplitInfoError - - switch { - default: - return nil, err - case err == nil: - return res.Header(), nil - case errors.As(err, &errSplitInfo): - return errSplitInfo.SplitInfo(), nil - } -} diff --git a/cmd/frostfs-cli/modules/storagegroup/root.go b/cmd/frostfs-cli/modules/storagegroup/root.go deleted file mode 100644 index 413310420..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/root.go +++ /dev/null @@ -1,46 +0,0 @@ -package storagegroup - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - objectCli "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" - "github.com/spf13/cobra" -) - -// Cmd represents the storagegroup command. -var Cmd = &cobra.Command{ - Use: "storagegroup", - Short: "Operations with Storage Groups", - Long: `Operations with Storage Groups`, - PersistentPreRun: func(cmd *cobra.Command, args []string) { - // bind exactly that cmd's flags to - // the viper before execution - commonflags.Bind(cmd) - commonflags.BindAPI(cmd) - }, -} - -const ( - sgIDFlag = "id" - sgRawFlag = "raw" -) - -func init() { - storageGroupChildCommands := []*cobra.Command{ - sgPutCmd, - sgGetCmd, - sgListCmd, - sgDelCmd, - } - - Cmd.AddCommand(storageGroupChildCommands...) - - for _, sgCommand := range storageGroupChildCommands { - objectCli.InitBearer(sgCommand) - commonflags.InitAPI(sgCommand) - } - - initSGPutCmd() - initSGGetCmd() - initSGListCmd() - initSGDeleteCmd() -} diff --git a/cmd/frostfs-cli/modules/storagegroup/util.go b/cmd/frostfs-cli/modules/storagegroup/util.go deleted file mode 100644 index 5ca61a58e..000000000 --- a/cmd/frostfs-cli/modules/storagegroup/util.go +++ /dev/null @@ -1,45 +0,0 @@ -package storagegroup - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { - readCID(cmd, cnr) - readSGID(cmd, obj) - - var addr oid.Address - addr.SetContainer(*cnr) - addr.SetObject(*obj) - return addr -} - -func readCID(cmd *cobra.Command, id *cid.ID) { - f := cmd.Flag(commonflags.CIDFlag) - if f == nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing container flag (%s)", commonflags.CIDFlag)) - return - } - - err := id.DecodeString(f.Value.String()) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err) -} - -func readSGID(cmd *cobra.Command, id *oid.ID) { - const flag = "id" - - f := cmd.Flag(flag) - if f == nil { - commonCmd.ExitOnErr(cmd, "", fmt.Errorf("missing storage group flag (%s)", flag)) - return - } - - err := id.DecodeString(f.Value.String()) - commonCmd.ExitOnErr(cmd, "decode storage group ID string: %w", err) -} diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go new file mode 100644 index 000000000..54c7d18e3 --- /dev/null +++ b/cmd/frostfs-ir/config.go @@ -0,0 +1,80 @@ +package main + +import ( + "os" + "os/signal" + "syscall" + + configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +func newConfig() (*viper.Viper, error) { + var err error + var dv = viper.New() + + defaultConfiguration(dv) + + _, err = configViper.CreateViper(configViper.WithConfigFile(*configFile), + configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix), + configViper.WithViper(dv)) + if err != nil { + return nil, err + } + + return dv, err +} + +func reloadConfig() error { + err := configViper.ReloadViper(configViper.WithConfigFile(*configFile), + configViper.WithConfigDir(*configDir), configViper.WithEnvPrefix(EnvPrefix), + configViper.WithViper(cfg)) + if err != nil { + return err + } + err = logPrm.SetLevelString(cfg.GetString("logger.level")) + if err != nil { + return err + } + return logPrm.Reload() +} + +func watchForSignal(cancel func()) { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) + + for { + select { + case err := <-intErr: + log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + cancel() + shutdown() + return + case sig := <-ch: + switch sig { + case syscall.SIGHUP: + log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + err := reloadConfig() + if err != nil { + log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + } + pprofCmp.reload() + metricsCmp.reload() + log.Info(logs.FrostFSIRReloadExtraWallets) + err = innerRing.SetExtraWallets(cfg) + if err != nil { + log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + } + log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + case syscall.SIGTERM, syscall.SIGINT: + log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + cancel() + shutdown() + log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + return + } + } + } +} diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go index bd374fcb5..007bb8964 100644 --- a/cmd/frostfs-ir/defaults.go +++ b/cmd/frostfs-ir/defaults.go @@ -1,46 +1,11 @@ package main import ( - "strings" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" "github.com/spf13/viper" ) -func newConfig(path, directory string) (*viper.Viper, error) { - const envPrefix = "FROSTFS_IR" - - var ( - err error - v = viper.New() - ) - - v.SetEnvPrefix(envPrefix) - v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - - defaultConfiguration(v) - - if path != "" { - v.SetConfigFile(path) - if strings.HasSuffix(path, ".json") { - v.SetConfigType("json") - } else { - v.SetConfigType("yml") - } - if err = v.ReadInConfig(); err != nil { - return v, err - } - } - - if directory != "" { - err = config.ReadConfigDir(v, directory) - } - - return v, err -} - func defaultConfiguration(cfg *viper.Viper) { cfg.SetDefault("logger.level", "info") @@ -68,10 +33,6 @@ func defaultConfiguration(cfg *viper.Viper) { setEmitDefaults(cfg) - setAuditDefaults(cfg) - - setSettlementDefaults(cfg) - cfg.SetDefault("indexer.cache_timeout", 15*time.Second) cfg.SetDefault("locode.db.path", "") @@ -95,23 +56,6 @@ func setFeeDefaults(cfg *viper.Viper) { cfg.SetDefault("fee.named_container_register", 25_0000_0000) // 25.0 Fixed8 } -func setSettlementDefaults(cfg *viper.Viper) { - cfg.SetDefault("settlement.basic_income_rate", 0) - cfg.SetDefault("settlement.audit_fee", 0) -} - -func setAuditDefaults(cfg *viper.Viper) { - cfg.SetDefault("audit.task.exec_pool_size", 10) - cfg.SetDefault("audit.task.queue_capacity", 100) - cfg.SetDefault("audit.timeout.get", "5s") - cfg.SetDefault("audit.timeout.head", "5s") - cfg.SetDefault("audit.timeout.rangehash", "5s") - cfg.SetDefault("audit.timeout.search", "10s") - cfg.SetDefault("audit.pdp.max_sleep_interval", "5s") - cfg.SetDefault("audit.pdp.pairs_pool_size", "10") - cfg.SetDefault("audit.por.pool_size", "10") -} - func setEmitDefaults(cfg *viper.Viper) { cfg.SetDefault("emit.storage.amount", 0) cfg.SetDefault("emit.mint.cache_size", 1000) @@ -142,8 +86,6 @@ func setWorkersDefaults(cfg *viper.Viper) { cfg.SetDefault("workers.frostfs", "10") cfg.SetDefault("workers.container", "10") cfg.SetDefault("workers.alphabet", "10") - cfg.SetDefault("workers.reputation", "10") - cfg.SetDefault("workers.subnet", "10") } func setTimersDefaults(cfg *viper.Viper) { @@ -161,11 +103,8 @@ func setContractsDefaults(cfg *viper.Viper) { cfg.SetDefault("contracts.frostfs", "") cfg.SetDefault("contracts.balance", "") cfg.SetDefault("contracts.container", "") - cfg.SetDefault("contracts.audit", "") cfg.SetDefault("contracts.proxy", "") cfg.SetDefault("contracts.processing", "") - cfg.SetDefault("contracts.reputation", "") - cfg.SetDefault("contracts.subnet", "") cfg.SetDefault("contracts.proxy", "") } diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go new file mode 100644 index 000000000..3a6d77d84 --- /dev/null +++ b/cmd/frostfs-ir/httpcomponent.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "net/http" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" + "go.uber.org/zap" +) + +type httpComponent struct { + srv *httputil.Server + address string + name string + handler http.Handler + shutdownDur time.Duration + enabled bool +} + +const ( + enabledKeyPostfix = ".enabled" + addressKeyPostfix = ".address" + shutdownTimeoutKeyPostfix = ".shutdown_timeout" +) + +func (c *httpComponent) init() { + log.Info(fmt.Sprintf("init %s", c.name)) + c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) + c.address = cfg.GetString(c.name + addressKeyPostfix) + c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) + + if c.enabled { + c.srv = httputil.New( + httputil.HTTPSrvPrm{ + Address: c.address, + Handler: c.handler, + }, + httputil.WithShutdownTimeout(c.shutdownDur), + ) + } else { + log.Info(fmt.Sprintf("%s is disabled, skip", c.name)) + c.srv = nil + } +} + +func (c *httpComponent) start() { + if c.srv != nil { + log.Info(fmt.Sprintf("start %s", c.name)) + wg.Add(1) + go func() { + defer wg.Done() + exitErr(c.srv.Serve()) + }() + } +} + +func (c *httpComponent) shutdown() error { + if c.srv != nil { + log.Info(fmt.Sprintf("shutdown %s", c.name)) + return c.srv.Shutdown() + } + return nil +} + +func (c *httpComponent) needReload() bool { + enabled := cfg.GetBool(c.name + enabledKeyPostfix) + address := cfg.GetString(c.name + addressKeyPostfix) + dur := cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) + return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur) +} + +func (c *httpComponent) reload() { + log.Info(fmt.Sprintf("reload %s", c.name)) + if c.needReload() { + log.Info(fmt.Sprintf("%s config updated", c.name)) + if err := c.shutdown(); err != nil { + log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.String("error", err.Error()), + ) + } else { + c.init() + c.start() + } + } +} diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index e4386a083..1718a46ab 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -4,16 +4,13 @@ import ( "context" "flag" "fmt" - "net/http" "os" - "os/signal" - "syscall" + "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" - httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -24,6 +21,21 @@ const ( // SuccessReturnCode returns when application closed without panic. SuccessReturnCode = 0 + + EnvPrefix = "FROSTFS_IR" +) + +var ( + wg = new(sync.WaitGroup) + intErr = make(chan error) // internal inner ring errors + logPrm = new(logger.Prm) + innerRing *innerring.Server + pprofCmp *pprofComponent + metricsCmp *httpComponent + log *logger.Logger + cfg *viper.Viper + configFile *string + configDir *string ) func exitErr(err error) { @@ -34,8 +46,8 @@ func exitErr(err error) { } func main() { - configFile := flag.String("config", "", "path to config") - configDir := flag.String("config-dir", "", "path to config directory") + configFile = flag.String("config", "", "path to config") + configDir = flag.String("config-dir", "", "path to config directory") versionFlag := flag.Bool("version", false, "frostfs-ir node version") flag.Parse() @@ -45,101 +57,58 @@ func main() { os.Exit(SuccessReturnCode) } - cfg, err := newConfig(*configFile, *configDir) + var err error + cfg, err = newConfig() exitErr(err) - var logPrm logger.Prm - err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) exitErr(err) - log, err := logger.NewLogger(&logPrm) + log, err = logger.NewLogger(logPrm) exitErr(err) - ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) - intErr := make(chan error) // internal inner ring errors + pprofCmp = newPprofComponent() + pprofCmp.init() - httpServers := initHTTPServers(cfg, log) + metricsCmp = newMetricsComponent() + metricsCmp.init() - innerRing, err := innerring.New(ctx, log, cfg, intErr) + innerRing, err = innerring.New(ctx, log, cfg, intErr) exitErr(err) - // start HTTP servers - for i := range httpServers { - srv := httpServers[i] - go func() { - exitErr(srv.Serve()) - }() - } + pprofCmp.start() + metricsCmp.start() // start inner ring err = innerRing.Start(ctx, intErr) exitErr(err) - log.Info("application started", + log.Info(logs.CommonApplicationStarted, zap.String("version", misc.Version)) - select { - case <-ctx.Done(): - case err := <-intErr: - log.Info("internal error", zap.String("msg", err.Error())) - } + watchForSignal(cancel) - innerRing.Stop() + <-ctx.Done() // graceful shutdown + log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) + wg.Wait() - // shut down HTTP servers - for i := range httpServers { - srv := httpServers[i] - - go func() { - err := srv.Shutdown() - if err != nil { - log.Debug("could not shutdown HTTP server", - zap.String("error", err.Error()), - ) - } - }() - } - - log.Info("application stopped") + log.Info(logs.FrostFSIRApplicationStopped) } -func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server { - items := []struct { - cfgPrefix string - handler func() http.Handler - }{ - {"pprof", httputil.Handler}, - {"prometheus", promhttp.Handler}, - } - - httpServers := make([]*httputil.Server, 0, len(items)) - - for _, item := range items { - if !cfg.GetBool(item.cfgPrefix + ".enabled") { - log.Info(item.cfgPrefix + " is disabled, skip") - continue - } - - addr := cfg.GetString(item.cfgPrefix + ".address") - - var prm httputil.HTTPSrvPrm - - prm.Address = addr - prm.Handler = item.handler() - - httpServers = append(httpServers, - httputil.New(prm, - httputil.WithShutdownTimeout( - cfg.GetDuration(item.cfgPrefix+".shutdown_timeout"), - ), - ), +func shutdown() { + innerRing.Stop() + if err := metricsCmp.shutdown(); err != nil { + log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.String("error", err.Error()), + ) + } + if err := pprofCmp.shutdown(); err != nil { + log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.String("error", err.Error()), ) } - - return httpServers } diff --git a/cmd/frostfs-ir/metrics.go b/cmd/frostfs-ir/metrics.go new file mode 100644 index 000000000..39b432c74 --- /dev/null +++ b/cmd/frostfs-ir/metrics.go @@ -0,0 +1,12 @@ +package main + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" +) + +func newMetricsComponent() *httpComponent { + return &httpComponent{ + name: "prometheus", + handler: metrics.Handler(), + } +} diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go new file mode 100644 index 000000000..d67c463fc --- /dev/null +++ b/cmd/frostfs-ir/pprof.go @@ -0,0 +1,68 @@ +package main + +import ( + "fmt" + "runtime" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" + "go.uber.org/zap" +) + +type pprofComponent struct { + httpComponent + blockRate int + mutexRate int +} + +const ( + pprofBlockRateKey = "pprof.block_rate" + pprofMutexRateKey = "pprof.mutex_rate" +) + +func newPprofComponent() *pprofComponent { + return &pprofComponent{ + httpComponent: httpComponent{ + name: "pprof", + handler: httputil.Handler(), + }, + } +} + +func (c *pprofComponent) init() { + c.httpComponent.init() + + if c.enabled { + c.blockRate = cfg.GetInt(pprofBlockRateKey) + c.mutexRate = cfg.GetInt(pprofMutexRateKey) + runtime.SetBlockProfileRate(c.blockRate) + runtime.SetMutexProfileFraction(c.mutexRate) + } else { + c.blockRate = 0 + c.mutexRate = 0 + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(0) + } +} + +func (c *pprofComponent) needReload() bool { + blockRate := cfg.GetInt(pprofBlockRateKey) + mutexRate := cfg.GetInt(pprofMutexRateKey) + return c.httpComponent.needReload() || + c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate) +} + +func (c *pprofComponent) reload() { + log.Info(fmt.Sprintf("reload %s", c.name)) + if c.needReload() { + log.Info(fmt.Sprintf("%s config updated", c.name)) + if err := c.shutdown(); err != nil { + log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + zap.String("error", err.Error())) + return + } + + c.init() + c.start() + } +} diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go index fb0065a62..bc7f28a3a 100644 --- a/cmd/frostfs-lens/internal/meta/inspect.go +++ b/cmd/frostfs-lens/internal/meta/inspect.go @@ -36,7 +36,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { storageID := meta.StorageIDPrm{} storageID.SetAddress(addr) - resStorageID, err := db.StorageID(storageID) + resStorageID, err := db.StorageID(cmd.Context(), storageID) common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err)) if id := resStorageID.StorageID(); id != nil { @@ -51,7 +51,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { siErr := new(object.SplitInfoError) - res, err := db.Get(prm) + res, err := db.Get(cmd.Context(), prm) if errors.As(err, &siErr) { link, linkSet := siErr.SplitInfo().Link() last, lastSet := siErr.SplitInfo().LastPart() diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 3d4fc7375..dfbaf3525 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -24,6 +24,61 @@ type valueWithTime[V any] struct { e error } +type locker struct { + mtx *sync.Mutex + waiters int // not protected by mtx, must used outer mutex to update concurrently +} + +type keyLocker[K comparable] struct { + lockers map[K]*locker + lockersMtx *sync.Mutex +} + +func newKeyLocker[K comparable]() *keyLocker[K] { + return &keyLocker[K]{ + lockers: make(map[K]*locker), + lockersMtx: &sync.Mutex{}, + } +} + +func (l *keyLocker[K]) LockKey(key K) { + l.lockersMtx.Lock() + + if locker, found := l.lockers[key]; found { + locker.waiters++ + l.lockersMtx.Unlock() + + locker.mtx.Lock() + return + } + + locker := &locker{ + mtx: &sync.Mutex{}, + waiters: 1, + } + locker.mtx.Lock() + + l.lockers[key] = locker + l.lockersMtx.Unlock() +} + +func (l *keyLocker[K]) UnlockKey(key K) { + l.lockersMtx.Lock() + defer l.lockersMtx.Unlock() + + locker, found := l.lockers[key] + if !found { + return + } + + if locker.waiters == 1 { + delete(l.lockers, key) + } + locker.waiters-- + + locker.mtx.Unlock() +} + // entity that provides TTL cache interface. type ttlNetCache[K comparable, V any] struct { ttl time.Duration @@ -33,6 +88,8 @@ type ttlNetCache[K comparable, V any] struct { cache *lru.Cache[K, *valueWithTime[V]] netRdr netValueReader[K, V] + + keyLocker *keyLocker[K] } // complicates netValueReader with TTL caching mechanism. @@ -41,10 +98,11 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n fatalOnErr(err) return &ttlNetCache[K, V]{ - ttl: ttl, - sz: sz, - cache: cache, - netRdr: netRdr, + ttl: ttl, + sz: sz, + cache: cache, + netRdr: netRdr, + keyLocker: newKeyLocker[K](), } } @@ -55,22 +113,33 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n // returned value should not be modified. func (c *ttlNetCache[K, V]) get(key K) (V, error) { val, ok := c.cache.Peek(key) - if ok { - if time.Since(val.t) < c.ttl { - return val.v, val.e - } + if ok && time.Since(val.t) < c.ttl { + return val.v, val.e + } - c.cache.Remove(key) + c.keyLocker.LockKey(key) + defer c.keyLocker.UnlockKey(key) + + val, ok = c.cache.Peek(key) + if ok && time.Since(val.t) < c.ttl { + return val.v, val.e } v, err := c.netRdr(key) - c.set(key, v, err) + c.cache.Add(key, &valueWithTime[V]{ + v: v, + t: time.Now(), + e: err, + }) return v, err } func (c *ttlNetCache[K, V]) set(k K, v V, e error) { + c.keyLocker.LockKey(k) + defer c.keyLocker.UnlockKey(k) + c.cache.Add(k, &valueWithTime[V]{ v: v, t: time.Now(), @@ -79,6 +148,9 @@ func (c *ttlNetCache[K, V]) set(k K, v V, e error) { } func (c *ttlNetCache[K, V]) remove(key K) { + c.keyLocker.LockKey(key) + defer c.keyLocker.UnlockKey(key) + c.cache.Remove(key) } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go new file mode 100644 index 000000000..a3e1c4ea6 --- /dev/null +++ b/cmd/frostfs-node/cache_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" +) + +func TestKeyLocker(t *testing.T) { + taken := false + eg, _ := errgroup.WithContext(context.Background()) + keyLocker := newKeyLocker[int]() + for i := 0; i < 100; i++ { + eg.Go(func() error { + keyLocker.LockKey(0) + defer keyLocker.UnlockKey(0) + + require.False(t, taken) + taken = true + require.True(t, taken) + time.Sleep(10 * time.Millisecond) + taken = false + require.False(t, taken) + + return nil + }) + } + require.NoError(t, eg.Wait()) +} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index d81e47b17..987d27fcd 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -29,6 +29,7 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" @@ -54,8 +55,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone" tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller" - truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -342,13 +341,13 @@ type internals struct { func (c *cfg) startMaintenance() { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info("started local node's maintenance") + c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. func (c *internals) stopMaintenance() { c.isMaintenance.Store(false) - c.log.Info("stopped local node's maintenance") + c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance) } // IsMaintenance checks if storage node is under maintenance. @@ -413,7 +412,6 @@ type cfg struct { cfgNodeInfo cfgNodeInfo cfgNetmap cfgNetmap cfgControlService cfgControlService - cfgReputation cfgReputation cfgObject cfgObject cfgNotifications cfgNotifications } @@ -451,8 +449,6 @@ type cfgMorph struct { // TTL of Sidechain cached values. Non-positive value disables caching. cacheTTL time.Duration - eigenTrustTicker *eigenTrustTickers // timers for EigenTrust iterations - proxyScriptHash neogoutil.Uint160 } @@ -531,16 +527,6 @@ type cfgControlService struct { server *grpc.Server } -type cfgReputation struct { - workerPool util.WorkerPool // pool for EigenTrust algorithm's iterations - - localTrustStorage *truststorage.Storage - - localTrustCtrl *trustcontroller.Controller - - scriptHash neogoutil.Uint160 -} - var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block") func initCfg(appCfg *config.Config) *cfg { @@ -581,8 +567,6 @@ func initCfg(appCfg *config.Config) *cfg { } c.cfgObject = initCfgObject(appCfg) - c.cfgReputation = initReputation(appCfg) - user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey) c.metricsCollector = metrics.NewNodeMetrics() @@ -661,16 +645,6 @@ func initContainer(appCfg *config.Config) cfgContainer { } } -func initReputation(appCfg *config.Config) cfgReputation { - reputationWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) - fatalOnErr(err) - - return cfgReputation{ - scriptHash: contractsconfig.Reputation(appCfg), - workerPool: reputationWorkerPool, - } -} - func initCfgGRPC() cfgGRPC { maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes @@ -881,10 +855,10 @@ func initLocalStorage(c *cfg) { for _, optsWithMeta := range c.shardOpts() { id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...) if err != nil { - c.log.Error("failed to attach shard to engine", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) } else { shardsAttached++ - c.log.Info("shard attached to engine", zap.Stringer("id", id)) + c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) } } if shardsAttached == 0 { @@ -894,15 +868,15 @@ func initLocalStorage(c *cfg) { c.cfgObject.cfgLocalStorage.localStorage = ls c.onShutdown(func() { - c.log.Info("closing components of the storage engine...") + c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine) err := ls.Close() if err != nil { - c.log.Info("storage engine closing failure", + c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure, zap.String("error", err.Error()), ) } else { - c.log.Info("all components of the storage engine closed successfully") + c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) } }) } @@ -976,11 +950,11 @@ func (c *cfg) bootstrap() error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info("bootstrapping with the maintenance state") + c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState) return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance) } - c.log.Info("bootstrapping with online state", + c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) @@ -1015,32 +989,32 @@ func (c *cfg) signalWatcher(ctx context.Context) { case syscall.SIGHUP: c.reloadConfig(ctx) case syscall.SIGTERM, syscall.SIGINT: - c.log.Info("termination signal has been received, stopping...") + c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) // TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING) c.shutdown() - c.log.Info("termination signal processing is complete") + c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) return } case err := <-c.internalErr: // internal application error - c.log.Warn("internal application error", + c.log.Warn(logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) c.shutdown() - c.log.Info("internal error processing is complete") + c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) return } } } func (c *cfg) reloadConfig(ctx context.Context) { - c.log.Info("SIGHUP has been received, rereading configuration...") + c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) err := c.readConfig(c.appCfg) if err != nil { - c.log.Error("configuration reading", zap.Error(err)) + c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) return } @@ -1052,7 +1026,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { logPrm, err := c.loggerPrm() if err != nil { - c.log.Error("logger configuration preparation", zap.Error(err)) + c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) return } @@ -1060,7 +1034,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { components = append(components, dCmp{"tracing", func() error { updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg)) if updated { - c.log.Info("tracing configation updated") + c.log.Info(logs.FrostFSNodeTracingConfigationUpdated) } return err }}) @@ -1085,20 +1059,20 @@ func (c *cfg) reloadConfig(ctx context.Context) { err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) if err != nil { - c.log.Error("storage engine configuration update", zap.Error(err)) + c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) return } for _, component := range components { err = component.reloadFunc() if err != nil { - c.log.Error("updated configuration applying", + c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying, zap.String("component", component.name), zap.Error(err)) } } - c.log.Info("configuration has been reloaded successfully") + c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } func (c *cfg) shutdown() { diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go index 6cf3baa03..36e53ea7c 100644 --- a/cmd/frostfs-node/config/calls.go +++ b/cmd/frostfs-node/config/calls.go @@ -2,6 +2,8 @@ package config import ( "strings" + + configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" ) // Sub returns a subsection of the Config by name. @@ -38,11 +40,11 @@ func (x *Config) Sub(name string) *Config { // // Returns nil if config is nil. func (x *Config) Value(name string) any { - value := x.v.Get(strings.Join(append(x.path, name), separator)) + value := x.v.Get(strings.Join(append(x.path, name), configViper.Separator)) if value != nil || x.defaultPath == nil { return value } - return x.v.Get(strings.Join(append(x.defaultPath, name), separator)) + return x.v.Get(strings.Join(append(x.defaultPath, name), configViper.Separator)) } // SetDefault sets fallback config for missing values. diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go index 37ffd2e88..68bf1c679 100644 --- a/cmd/frostfs-node/config/calls_test.go +++ b/cmd/frostfs-node/config/calls_test.go @@ -2,11 +2,12 @@ package config_test import ( "os" + "strings" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "github.com/stretchr/testify/require" ) @@ -35,7 +36,9 @@ func TestConfigEnv(t *testing.T) { value = "some value" ) - err := os.Setenv(internal.Env(section, name), value) + envName := strings.ToUpper( + strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator)) + err := os.Setenv(envName, value) require.NoError(t, err) c := configtest.EmptyConfig() diff --git a/cmd/frostfs-node/config/config.go b/cmd/frostfs-node/config/config.go index 12c92306b..77e34d613 100644 --- a/cmd/frostfs-node/config/config.go +++ b/cmd/frostfs-node/config/config.go @@ -1,11 +1,7 @@ package config import ( - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" + configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "github.com/spf13/viper" ) @@ -18,69 +14,47 @@ import ( type Config struct { v *viper.Viper - opts opts + configFile string + configDir string + envPrefix string defaultPath []string path []string } -const separator = "." - -// Prm groups required parameters of the Config. -type Prm struct{} +const ( + // EnvPrefix is a prefix of ENV variables related + // to storage node configuration. + EnvPrefix = "FROSTFS" +) // New creates a new Config instance. // -// If file option is provided (WithConfigFile), +// If file option is provided, // configuration values are read from it. // Otherwise, Config is a degenerate tree. -func New(_ Prm, opts ...Option) *Config { - v := viper.New() +func New(configFile, configDir, envPrefix string) *Config { + v, err := configViper.CreateViper( + configViper.WithConfigFile(configFile), + configViper.WithConfigDir(configDir), + configViper.WithEnvPrefix(envPrefix)) - v.SetEnvPrefix(internal.EnvPrefix) - v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer(separator, internal.EnvSeparator)) - - o := defaultOpts() - for i := range opts { - opts[i](o) - } - - if o.path != "" { - v.SetConfigFile(o.path) - - err := v.ReadInConfig() - if err != nil { - panic(fmt.Errorf("failed to read config: %w", err)) - } - } - - if o.configDir != "" { - if err := config.ReadConfigDir(v, o.configDir); err != nil { - panic(fmt.Errorf("failed to read config dir: %w", err)) - } + if err != nil { + panic(err) } return &Config{ - v: v, - opts: *o, + v: v, + configFile: configFile, + configDir: configDir, + envPrefix: envPrefix, } } // Reload reads configuration path if it was provided to New. func (x *Config) Reload() error { - if x.opts.path != "" { - err := x.v.ReadInConfig() - if err != nil { - return fmt.Errorf("rereading configuration file: %w", err) - } - } - - if x.opts.configDir != "" { - if err := config.ReadConfigDir(x.v, x.opts.configDir); err != nil { - return fmt.Errorf("rereading configuration dir: %w", err) - } - } - - return nil + return configViper.ReloadViper( + configViper.WithViper(x.v), configViper.WithConfigFile(x.configFile), + configViper.WithConfigDir(x.configDir), + configViper.WithEnvPrefix(x.envPrefix)) } diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go new file mode 100644 index 000000000..ede15a522 --- /dev/null +++ b/cmd/frostfs-node/config/configdir_test.go @@ -0,0 +1,24 @@ +package config + +import ( + "os" + "path" + "testing" + + "github.com/spf13/cast" + "github.com/stretchr/testify/require" +) + +func TestConfigDir(t *testing.T) { + dir := t.TempDir() + + cfgFileName0 := path.Join(dir, "cfg_00.json") + cfgFileName1 := path.Join(dir, "cfg_01.yml") + + require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0777)) + require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0777)) + + c := New("", dir, "") + require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) + require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) +} diff --git a/cmd/frostfs-node/config/contracts/config.go b/cmd/frostfs-node/config/contracts/config.go index 0450d0d57..c5f14f3ca 100644 --- a/cmd/frostfs-node/config/contracts/config.go +++ b/cmd/frostfs-node/config/contracts/config.go @@ -38,15 +38,6 @@ func Container(c *config.Config) util.Uint160 { return contractAddress(c, "container") } -// Reputation returnsthe value of "reputation" config parameter -// from "contracts" section. -// -// Returns zero filled script hash if the value is not set. -// Throws panic if the value is not a 20-byte LE hex-encoded string. -func Reputation(c *config.Config) util.Uint160 { - return contractAddress(c, "reputation") -} - // Proxy returnsthe value of "proxy" config parameter // from "contracts" section. // diff --git a/cmd/frostfs-node/config/contracts/config_test.go b/cmd/frostfs-node/config/contracts/config_test.go index 93956d6bb..d816ea1e4 100644 --- a/cmd/frostfs-node/config/contracts/config_test.go +++ b/cmd/frostfs-node/config/contracts/config_test.go @@ -18,7 +18,6 @@ func TestContractsSection(t *testing.T) { require.Equal(t, emptyHash, contractsconfig.Balance(empty)) require.Equal(t, emptyHash, contractsconfig.Container(empty)) require.Equal(t, emptyHash, contractsconfig.Netmap(empty)) - require.Equal(t, emptyHash, contractsconfig.Reputation(empty)) require.Equal(t, emptyHash, contractsconfig.Proxy(empty)) }) @@ -33,9 +32,6 @@ func TestContractsSection(t *testing.T) { expNetmap, err := util.Uint160DecodeStringLE("0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca") require.NoError(t, err) - expReputation, err := util.Uint160DecodeStringLE("441995f631c1da2b133462b71859494a5cd45e90") - require.NoError(t, err) - expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f") require.NoError(t, err) @@ -43,13 +39,11 @@ func TestContractsSection(t *testing.T) { balance := contractsconfig.Balance(c) container := contractsconfig.Container(c) netmap := contractsconfig.Netmap(c) - reputation := contractsconfig.Reputation(c) proxy := contractsconfig.Proxy(c) require.Equal(t, expBalance, balance) require.Equal(t, expConatiner, container) require.Equal(t, expNetmap, netmap) - require.Equal(t, expReputation, reputation) require.Equal(t, expProxy, proxy) } diff --git a/cmd/frostfs-node/config/internal/env.go b/cmd/frostfs-node/config/internal/env.go deleted file mode 100644 index 6bf260ac1..000000000 --- a/cmd/frostfs-node/config/internal/env.go +++ /dev/null @@ -1,22 +0,0 @@ -package internal - -import ( - "strings" -) - -// EnvPrefix is a prefix of ENV variables related -// to storage node configuration. -const EnvPrefix = "FROSTFS" - -// EnvSeparator is a section separator in ENV variables. -const EnvSeparator = "_" - -// Env returns ENV variable key for a particular config parameter. -func Env(path ...string) string { - return strings.ToUpper( - strings.Join( - append([]string{EnvPrefix}, path...), - EnvSeparator, - ), - ) -} diff --git a/cmd/frostfs-node/config/internal/env_test.go b/cmd/frostfs-node/config/internal/env_test.go deleted file mode 100644 index 3d1af0715..000000000 --- a/cmd/frostfs-node/config/internal/env_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package internal_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/internal" - "github.com/stretchr/testify/require" -) - -func TestEnv(t *testing.T) { - require.Equal(t, - "FROSTFS_SECTION_PARAMETER", - internal.Env("section", "parameter"), - ) -} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index a2794422e..9dfe8ddf4 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -177,33 +177,6 @@ func (p PersistentStateConfig) Path() string { return PersistentStatePathDefault } -// SubnetConfig represents node configuration related to subnets. -type SubnetConfig config.Config - -// Init initializes SubnetConfig from "subnet" sub-section of "node" section -// of the root config. -func (x *SubnetConfig) Init(root config.Config) { - *x = SubnetConfig(*root.Sub(subsection).Sub("subnet")) -} - -// ExitZero returns the value of "exit_zero" config parameter as bool. -// Returns false if the value can not be cast. -func (x SubnetConfig) ExitZero() bool { - return config.BoolSafe((*config.Config)(&x), "exit_zero") -} - -// IterateSubnets casts the value of "entries" config parameter to string slice, -// iterates over all of its elements and passes them to f. -// -// Does nothing if the value can not be cast to string slice. -func (x SubnetConfig) IterateSubnets(f func(string)) { - ids := config.StringSliceSafe((*config.Config)(&x), "entries") - - for i := range ids { - f(ids[i]) - } -} - // Notification returns structure that provides access to "notification" // subsection of "node" section. func Notification(c *config.Config) NotificationConfig { diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index ff085c506..3a1120491 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -52,20 +52,6 @@ func TestNodeSection(t *testing.T) { require.Equal(t, "", notificationDefaultCertPath) require.Equal(t, "", notificationDefaultKeyPath) require.Equal(t, "", notificationDefaultCAPath) - - var subnetCfg SubnetConfig - - subnetCfg.Init(*empty) - - require.False(t, subnetCfg.ExitZero()) - - called := false - - subnetCfg.IterateSubnets(func(string) { - called = true - }) - - require.False(t, called) }) const path = "../../../../config/example/node" @@ -143,20 +129,6 @@ func TestNodeSection(t *testing.T) { require.Equal(t, "/cert/path", notificationCertPath) require.Equal(t, "/key/path", notificationKeyPath) require.Equal(t, "/ca/path", notificationCAPath) - - var subnetCfg SubnetConfig - - subnetCfg.Init(*c) - - require.True(t, subnetCfg.ExitZero()) - - var ids []string - - subnetCfg.IterateSubnets(func(id string) { - ids = append(ids, id) - }) - - require.Equal(t, []string{"123", "456", "789"}, ids) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go index f891833a6..191694970 100644 --- a/cmd/frostfs-node/config/profiler/config.go +++ b/cmd/frostfs-node/config/profiler/config.go @@ -51,3 +51,27 @@ func Address(c *config.Config) string { return AddressDefault } + +// BlockRates returns the value of "block_rate" config parameter +// from "pprof" section. +func BlockRate(c *config.Config) int { + s := c.Sub(subsection) + + v := int(config.IntSafe(s, "block_rate")) + if v <= 0 { + return 0 + } + return v +} + +// MutexRate returns the value of "mutex_rate" config parameter +// from "pprof" section. +func MutexRate(c *config.Config) int { + s := c.Sub(subsection) + + v := int(config.IntSafe(s, "mutex_rate")) + if v <= 0 { + return 0 + } + return v +} diff --git a/cmd/frostfs-node/config/profiler/config_test.go b/cmd/frostfs-node/config/profiler/config_test.go index bb1b20eb8..355874387 100644 --- a/cmd/frostfs-node/config/profiler/config_test.go +++ b/cmd/frostfs-node/config/profiler/config_test.go @@ -18,6 +18,9 @@ func TestProfilerSection(t *testing.T) { require.Equal(t, profilerconfig.ShutdownTimeoutDefault, to) require.Equal(t, profilerconfig.AddressDefault, addr) require.False(t, profilerconfig.Enabled(configtest.EmptyConfig())) + + require.Zero(t, profilerconfig.BlockRate(configtest.EmptyConfig())) + require.Zero(t, profilerconfig.MutexRate(configtest.EmptyConfig())) }) const path = "../../../../config/example/node" @@ -29,6 +32,9 @@ func TestProfilerSection(t *testing.T) { require.Equal(t, 15*time.Second, to) require.Equal(t, "localhost:6060", addr) require.True(t, profilerconfig.Enabled(c)) + + require.Equal(t, 10_000, profilerconfig.BlockRate(c)) + require.Equal(t, 10_000, profilerconfig.MutexRate(c)) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go index cd10edaec..28ec65291 100644 --- a/cmd/frostfs-node/config/test/config.go +++ b/cmd/frostfs-node/config/test/config.go @@ -11,21 +11,15 @@ import ( ) func fromFile(path string) *config.Config { - var p config.Prm - os.Clearenv() // ENVs have priority over config files, so we do this in tests - return config.New(p, - config.WithConfigFile(path), - ) + return config.New(path, "", "") } func fromEnvFile(t testing.TB, path string) *config.Config { - var p config.Prm - loadEnv(t, path) // github.com/joho/godotenv can do that as well - return config.New(p) + return config.New("", "", config.EnvPrefix) } func forEachFile(paths []string, f func(*config.Config)) { @@ -51,9 +45,7 @@ func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) { // EmptyConfig returns config without any values and sections. func EmptyConfig() *config.Config { - var p config.Prm - - return config.New(p) + return config.New("", "", config.EnvPrefix) } // loadEnv reads .env file, parses `X=Y` records and sets OS ENVs. diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 6c864431d..569e4a7ca 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -11,6 +11,7 @@ import ( containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -130,19 +131,20 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c // TODO: use owner directly from the event after neofs-contract#256 will become resolved // but don't forget about the profit of reading the new container and caching it: // creation success are most commonly tracked by polling GET op. - cnr, err := cachedContainerStorage.Get(ev.ID) + cnr, err := cnrSrc.Get(ev.ID) if err == nil { cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true) + cachedContainerStorage.set(ev.ID, cnr, nil) } else { // unlike removal, we expect successful receive of the container // after successful creation, so logging can be useful - c.log.Error("read newly created container after the notification", + c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, zap.Stringer("id", ev.ID), zap.Error(err), ) } - c.log.Debug("container creation event's receipt", + c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), ) }) @@ -161,7 +163,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c cachedContainerStorage.handleRemoval(ev.ID) - c.log.Debug("container removal event's receipt", + c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, zap.Stringer("id", ev.ID), ) }) @@ -295,7 +297,7 @@ type morphLoadWriter struct { } func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error { - w.log.Debug("save used space announcement in contract", + w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract, zap.Uint64("epoch", a.Epoch()), zap.Stringer("cid", a.Container()), zap.Uint64("size", a.Value()), @@ -329,7 +331,7 @@ type remoteLoadAnnounceProvider struct { netmapKeys netmapCore.AnnouncedKeys clientCache interface { - Get(client.NodeInfo) (client.Client, error) + Get(client.NodeInfo) (client.MultiAddressClient, error) } deadEndProvider loadcontroller.WriterProvider @@ -458,7 +460,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr for i := range idList { sz, err := engine.ContainerSize(d.engine, idList[i]) if err != nil { - d.log.Debug("failed to calculate container size in storage engine", + d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine, zap.Stringer("cid", idList[i]), zap.String("error", err.Error()), ) @@ -466,7 +468,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr continue } - d.log.Debug("container size in storage engine calculated successfully", + d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully, zap.Uint64("size", sz), zap.Stringer("cid", idList[i]), ) diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index 5492f585f..f4b068419 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -5,6 +5,7 @@ import ( "net" controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" @@ -52,7 +53,7 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index f3943f3ff..b0a587782 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" "google.golang.org/grpc" @@ -33,7 +34,7 @@ func initGRPC(c *cfg) { if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error("could not read certificate from file", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return } @@ -63,7 +64,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { - c.log.Error("can't listen gRPC endpoint", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) return } @@ -93,14 +94,14 @@ func serveGRPC(c *cfg) { go func() { defer func() { - c.log.Info("stop listening gRPC endpoint", + c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint, zap.String("endpoint", lis.Addr().String()), ) c.wg.Done() }() - c.log.Info("start listening gRPC endpoint", + c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint, zap.String("endpoint", lis.Addr().String()), ) @@ -114,7 +115,7 @@ func serveGRPC(c *cfg) { func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { l = &logger.Logger{Logger: l.With(zap.String("name", name))} - l.Info("stopping gRPC server...") + l.Info(logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info("gRPC cannot shutdown gracefully, forcing stop") + l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info("gRPC server stopped successfully") + l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully) } diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index a97ad3879..a2024706b 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -8,6 +8,7 @@ import ( "os" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "go.uber.org/zap" @@ -45,7 +46,7 @@ func main() { os.Exit(SuccessReturnCode) } - appCfg := config.New(config.Prm{}, config.WithConfigFile(*configFile), config.WithConfigDir(*configDir)) + appCfg := config.New(*configFile, *configDir, config.EnvPrefix) err := validateConfig(appCfg) fatalOnErr(err) @@ -82,9 +83,8 @@ func initApp(ctx context.Context, c *cfg) { c.wg.Done() }() - pprof, _ := pprofComponent(c) metrics, _ := metricsComponent(c) - initAndLog(c, pprof.name, pprof.init) + initAndLog(c, "profiler", initProfilerService) initAndLog(c, metrics.name, metrics.init) initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) }) @@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) }) initAndLog(c, "session", initSessionService) - initAndLog(c, "reputation", func(c *cfg) { initReputationService(ctx, c) }) initAndLog(c, "notification", func(c *cfg) { initNotifications(ctx, c) }) initAndLog(c, "object", initObjectService) initAndLog(c, "tree", initTreeService) @@ -142,14 +141,14 @@ func bootUp(ctx context.Context, c *cfg) { } func wait(c *cfg, cancel func()) { - c.log.Info("application started", + c.log.Info(logs.CommonApplicationStarted, zap.String("version", misc.Version)) <-c.done // graceful shutdown cancel() - c.log.Debug("waiting for all processes to stop") + c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) c.wg.Wait() } diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index f18bbe65d..cf621086d 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -2,7 +2,7 @@ package main import ( metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" - "github.com/prometheus/client_golang/prometheus/promhttp" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" ) func metricsComponent(c *cfg) (*httpComponent, bool) { @@ -12,7 +12,7 @@ func metricsComponent(c *cfg) (*httpComponent, bool) { c.dynamicConfiguration.metrics = new(httpComponent) c.dynamicConfiguration.metrics.cfg = c c.dynamicConfiguration.metrics.name = "metrics" - c.dynamicConfiguration.metrics.handler = promhttp.Handler() + c.dynamicConfiguration.metrics.handler = metrics.Handler() updated = true } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 2db865ca3..2e086f994 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -7,6 +7,7 @@ import ( "time" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)), ) if err != nil { - c.log.Info("failed to create neo RPC client", + c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), zap.String("error", err.Error()), ) @@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) { } c.onShutdown(func() { - c.log.Info("closing morph components...") + c.log.Info(logs.FrostFSNodeClosingMorphComponents) cli.Close() }) if err := cli.SetGroupSignerScope(); err != nil { - c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err)) + c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) } c.cfgMorph.client = cli @@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { fatalOnErr(err) } - c.log.Info("notary support", + c.log.Info(logs.FrostFSNodeNotarySupport, zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), ) @@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { msPerBlock, err := c.cfgMorph.client.MsPerBlock() fatalOnErr(err) c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL)) + c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) } if c.cfgMorph.cacheTTL < 0 { @@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - c.log.Info("notary deposit has already been made") + c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) return } @@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } subs, err = subscriber.New(ctx, &subscriber.Params{ @@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info("new epoch event from sidechain", + c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -226,16 +227,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(block *block.Block) { - c.log.Debug("new block", zap.Uint32("index", block.Index)) + c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn("can't update persistent state", + c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } - - tickBlockTimers(c) }) } @@ -284,7 +283,6 @@ func lookupScriptHashesInNNS(c *cfg) { {&c.cfgNetmap.scriptHash, client.NNSNetmapContractName}, {&c.cfgAccounting.scriptHash, client.NNSBalanceContractName}, {&c.cfgContainer.scriptHash, client.NNSContainerContractName}, - {&c.cfgReputation.scriptHash, client.NNSReputationContractName}, {&c.cfgMorph.proxyScriptHash, client.NNSProxyContractName}, } ) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index d9b1c9208..58e3cb2f2 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -7,7 +7,7 @@ import ( "fmt" netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -18,7 +18,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "go.uber.org/atomic" "go.uber.org/zap" @@ -142,8 +141,6 @@ func initNetmapService(ctx context.Context, c *cfg) { parseAttributes(c) c.cfgNodeInfo.localInfo.SetOffline() - readSubnetCfg(c) - if c.cfgMorph.client == nil { initMorphComponents(ctx, c) } @@ -193,7 +190,7 @@ func addNewEpochNotificationHandlers(c *cfg) { if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 { err := c.bootstrap() if err != nil { - c.log.Warn("can't send re-bootstrap tx", zap.Error(err)) + c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } } }) @@ -203,7 +200,7 @@ func addNewEpochNotificationHandlers(c *cfg) { ni, err := c.netmapLocalNodeState(e) if err != nil { - c.log.Error("could not update node state on new epoch", + c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", e), zap.String("error", err.Error()), ) @@ -218,7 +215,7 @@ func addNewEpochNotificationHandlers(c *cfg) { addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { _, err := makeNotaryDeposit(c) if err != nil { - c.log.Error("could not make notary deposit", + c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), ) } @@ -226,29 +223,6 @@ func addNewEpochNotificationHandlers(c *cfg) { } } -func readSubnetCfg(c *cfg) { - var subnetCfg nodeconfig.SubnetConfig - - subnetCfg.Init(*c.appCfg) - - var ( - id subnetid.ID - err error - ) - - subnetCfg.IterateSubnets(func(idTxt string) { - err = id.DecodeString(idTxt) - fatalOnErrDetails("parse subnet entry", err) - - c.cfgNodeInfo.localInfo.EnterSubnet(id) - }) - - if subnetCfg.ExitZero() { - subnetid.MakeZero(&id) - c.cfgNodeInfo.localInfo.ExitSubnet(id) - } -} - // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(c *cfg) { @@ -298,7 +272,7 @@ func initNetmapState(c *cfg) { } } - c.log.Info("initial network state", + c.log.Info(logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) @@ -450,8 +424,6 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { ni.SetEpochDuration(netInfoMorph.EpochDuration) ni.SetContainerFee(netInfoMorph.ContainerFee) ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee) - ni.SetNumberOfEigenTrustIterations(netInfoMorph.EigenTrustIterations) - ni.SetEigenTrustAlpha(netInfoMorph.EigenTrustAlpha) ni.SetIRCandidateFee(netInfoMorph.IRCandidateFee) ni.SetWithdrawalFee(netInfoMorph.WithdrawalFee) diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go index 4a310e5b0..358b39a72 100644 --- a/cmd/frostfs-node/notificator.go +++ b/cmd/frostfs-node/notificator.go @@ -6,6 +6,7 @@ import ( "fmt" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler listRes, err := n.e.ListContainers(engine.ListContainersPrm{}) if err != nil { - log.Error("notificator: could not list containers", zap.Error(err)) + log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err)) return } @@ -41,9 +42,9 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler for _, c := range listRes.Containers() { selectPrm.WithContainerID(c) - selectRes, err := n.e.Select(selectPrm) + selectRes, err := n.e.Select(ctx, selectPrm) if err != nil { - log.Error("notificator: could not select objects from container", + log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer, zap.Stringer("cid", c), zap.Error(err), ) @@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler for _, a := range selectRes.AddressList() { err = n.processAddress(ctx, a, handler) if err != nil { - log.Error("notificator: could not process object", + log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject, zap.Stringer("address", a), zap.Error(err), ) @@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler } } - log.Debug("notificator: finished processing object notifications") + log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications) } func (n *notificationSource) processAddress( @@ -101,7 +102,7 @@ type notificationWriter struct { func (n notificationWriter) Notify(topic string, address oid.Address) { if err := n.w.Notify(topic, address); err != nil { - n.l.Warn("could not write object notification", + n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification, zap.Stringer("address", address), zap.String("topic", topic), zap.Error(err), diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index ff4335ff9..1ce330cb5 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "errors" "fmt" @@ -11,7 +10,7 @@ import ( metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -19,6 +18,7 @@ import ( morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl" @@ -36,15 +36,10 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "go.uber.org/zap" ) @@ -62,7 +57,7 @@ type objectSvc struct { func (c *cfg) MaxObjectSize() uint64 { sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { - c.log.Error("could not get max object size value", + c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, zap.String("error", err.Error()), ) } @@ -152,39 +147,12 @@ func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) { return result, nil } -type coreClientConstructor reputationClientConstructor - -func (x *coreClientConstructor) Get(info coreclient.NodeInfo) (coreclient.MultiAddressClient, error) { - c, err := (*reputationClientConstructor)(x).Get(info) - if err != nil { - return nil, err - } - - return c.(coreclient.MultiAddressClient), nil -} - func initObjectService(c *cfg) { keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) - clientConstructor := &reputationClientConstructor{ - log: c.log, - nmSrc: c.netMapSource, - netState: c.cfgNetmap.state, - trustStorage: c.cfgReputation.localTrustStorage, - basicConstructor: c.bgClientCache, - } + c.replicator = createReplicator(c, keyStorage, c.bgClientCache) - coreConstructor := &coreClientConstructor{ - log: c.log, - nmSrc: c.netMapSource, - netState: c.cfgNetmap.state, - trustStorage: c.cfgReputation.localTrustStorage, - basicConstructor: c.clientCache, - } - - c.replicator = createReplicator(c, keyStorage, clientConstructor) - - addPolicer(c, keyStorage, clientConstructor) + addPolicer(c, keyStorage, c.bgClientCache) traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c) @@ -192,11 +160,11 @@ func initObjectService(c *cfg) { sPutV2 := createPutSvcV2(sPut, keyStorage) - sSearch := createSearchSvc(c, keyStorage, traverseGen, coreConstructor) + sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache) sSearchV2 := createSearchSvcV2(sSearch, keyStorage) - sGet := createGetService(c, keyStorage, traverseGen, coreConstructor) + sGet := createGetService(c, keyStorage, traverseGen, c.clientCache) *c.cfgObject.getSvc = *sGet // need smth better @@ -235,7 +203,7 @@ func initObjectService(c *cfg) { } } -func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) { +func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) { ls := c.cfgObject.cfgLocalStorage.localStorage pol := policer.New( @@ -259,7 +227,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati _, err := ls.Inhume(ctx, inhumePrm) if err != nil { - c.log.Warn("could not inhume mark redundant copy as garbage", + c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, zap.String("error", err.Error()), ) } @@ -287,7 +255,7 @@ func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { } } -func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) *replicator.Replicator { +func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator { ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( @@ -297,8 +265,9 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *re ), replicator.WithLocalStorage(ls), replicator.WithRemoteSender( - putsvc.NewRemoteSender(keyStorage, (*coreClientConstructor)(clientConstructor)), + putsvc.NewRemoteSender(keyStorage, cache), ), + replicator.WithMetrics(c.metricsCollector), ) } @@ -318,17 +287,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service { } } - putConstructor := &coreClientConstructor{ - log: c.log, - nmSrc: c.netMapSource, - netState: c.cfgNetmap.state, - trustStorage: c.cfgReputation.localTrustStorage, - basicConstructor: c.putClientCache, - } - return putsvc.NewService( putsvc.WithKeyStorage(keyStorage), - putsvc.WithClientConstructor(putConstructor), + putsvc.WithClientConstructor(c.putClientCache), putsvc.WithMaxSizeSource(newCachedMaxObjectSizeSource(c)), putsvc.WithObjectStorage(os), putsvc.WithContainerSource(c.cfgObject.cnrSource), @@ -347,7 +308,7 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2 ) } -func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *coreClientConstructor) *searchsvc.Service { +func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage return searchsvc.New( @@ -372,21 +333,18 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage) } func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, - coreConstructor *coreClientConstructor) *getsvc.Service { + coreConstructor *cache.ClientCache) *getsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage return getsvc.New( - getsvc.WithLogger(c.log), - getsvc.WithLocalStorageEngine(ls), - getsvc.WithClientConstructor(coreConstructor), - getsvc.WithTraverserGenerator( - traverseGen.WithTraverseOptions( - placement.SuccessAfter(1), - ), + keyStorage, + c.netMapSource, + ls, + traverseGen.WithTraverseOptions( + placement.SuccessAfter(1), ), - getsvc.WithNetMapSource(c.netMapSource), - getsvc.WithKeyStorage(keyStorage), - ) + coreConstructor, + getsvc.WithLogger(c.log)) } func createGetServiceV2(sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -479,135 +437,6 @@ func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) { return eaclInfo, nil } -type reputationClientConstructor struct { - log *logger.Logger - - nmSrc netmap.Source - - netState netmap.State - - trustStorage *truststorage.Storage - - basicConstructor interface { - Get(coreclient.NodeInfo) (coreclient.Client, error) - } -} - -type reputationClient struct { - coreclient.MultiAddressClient - - prm truststorage.UpdatePrm - - cons *reputationClientConstructor -} - -func (c *reputationClient) submitResult(err error) { - currEpoch := c.cons.netState.CurrentEpoch() - sat := err == nil - - c.cons.log.Debug( - "writing local reputation values", - zap.Uint64("epoch", currEpoch), - zap.Bool("satisfactory", sat), - ) - - prm := c.prm - prm.SetSatisfactory(sat) - prm.SetEpoch(currEpoch) - - c.cons.trustStorage.Update(prm) -} - -func (c *reputationClient) ObjectPutInit(ctx context.Context, prm client.PrmObjectPutInit) (*client.ObjectWriter, error) { - res, err := c.MultiAddressClient.ObjectPutInit(ctx, prm) - - // FIXME: (neofs-node#1193) here we submit only initialization errors, writing errors are not processed - c.submitResult(err) - - return res, err -} - -func (c *reputationClient) ObjectDelete(ctx context.Context, prm client.PrmObjectDelete) (*client.ResObjectDelete, error) { - res, err := c.MultiAddressClient.ObjectDelete(ctx, prm) - if err != nil { - c.submitResult(err) - } else { - c.submitResult(apistatus.ErrFromStatus(res.Status())) - } - - return res, err -} - -func (c *reputationClient) GetObjectInit(ctx context.Context, prm client.PrmObjectGet) (*client.ObjectReader, error) { - res, err := c.MultiAddressClient.ObjectGetInit(ctx, prm) - - // FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed - c.submitResult(err) - - return res, err -} - -func (c *reputationClient) ObjectHead(ctx context.Context, prm client.PrmObjectHead) (*client.ResObjectHead, error) { - res, err := c.MultiAddressClient.ObjectHead(ctx, prm) - - c.submitResult(err) - - return res, err -} - -func (c *reputationClient) ObjectHash(ctx context.Context, prm client.PrmObjectHash) (*client.ResObjectHash, error) { - res, err := c.MultiAddressClient.ObjectHash(ctx, prm) - - c.submitResult(err) - - return res, err -} - -func (c *reputationClient) ObjectSearchInit(ctx context.Context, prm client.PrmObjectSearch) (*client.ObjectListReader, error) { - res, err := c.MultiAddressClient.ObjectSearchInit(ctx, prm) - - // FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed - c.submitResult(err) - - return res, err -} - -func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.Client, error) { - cl, err := c.basicConstructor.Get(info) - if err != nil { - return nil, err - } - - nm, err := netmap.GetLatestNetworkMap(c.nmSrc) - if err == nil { - key := info.PublicKey() - - nmNodes := nm.Nodes() - var peer apireputation.PeerID - - for i := range nmNodes { - if bytes.Equal(nmNodes[i].PublicKey(), key) { - peer.SetPublicKey(nmNodes[i].PublicKey()) - - prm := truststorage.UpdatePrm{} - prm.SetPeer(peer) - - return &reputationClient{ - MultiAddressClient: cl.(coreclient.MultiAddressClient), - prm: prm, - cons: c, - }, nil - } - } - } else { - c.log.Warn("could not get latest network map to overload the client", - zap.String("error", err.Error()), - ) - } - - return cl, nil -} - type engineWithNotifications struct { base putsvc.ObjectStorage nw notificationWriter @@ -616,20 +445,20 @@ type engineWithNotifications struct { defaultTopic string } -func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) { - return e.base.IsLocked(address) +func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) { + return e.base.IsLocked(ctx, address) } func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error { return e.base.Delete(ctx, tombstone, toDelete) } -func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error { - return e.base.Lock(locker, toLock) +func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { + return e.base.Lock(ctx, locker, toLock) } -func (e engineWithNotifications) Put(o *objectSDK.Object) error { - if err := e.base.Put(o); err != nil { +func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error { + if err := e.base.Put(ctx, o); err != nil { return err } @@ -653,8 +482,8 @@ type engineWithoutNotifications struct { engine *engine.StorageEngine } -func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error) { - return e.engine.IsLocked(address) +func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) { + return e.engine.IsLocked(ctx, address) } func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error { @@ -672,10 +501,10 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad return err } -func (e engineWithoutNotifications) Lock(locker oid.Address, toLock []oid.ID) error { - return e.engine.Lock(locker.Container(), locker.Object(), toLock) +func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { + return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock) } -func (e engineWithoutNotifications) Put(o *objectSDK.Object) error { - return engine.Put(e.engine, o) +func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error { + return engine.Put(ctx, e.engine, o) } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index 9be2dd9df..dcd320146 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -1,10 +1,19 @@ package main import ( + "runtime" + profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" ) +func initProfilerService(c *cfg) { + tuneProfilers(c) + + pprof, _ := pprofComponent(c) + pprof.init(c) +} + func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before @@ -13,6 +22,7 @@ func pprofComponent(c *cfg) (*httpComponent, bool) { c.dynamicConfiguration.pprof.cfg = c c.dynamicConfiguration.pprof.name = "pprof" c.dynamicConfiguration.pprof.handler = httputil.Handler() + c.dynamicConfiguration.pprof.preReload = tuneProfilers updated = true } @@ -35,3 +45,18 @@ func pprofComponent(c *cfg) (*httpComponent, bool) { return c.dynamicConfiguration.pprof, updated } + +func tuneProfilers(c *cfg) { + // Disabled by default, see documentation for + // runtime.SetBlockProfileRate() and runtime.SetMutexProfileFraction(). + blockRate := 0 + mutexRate := 0 + + if profilerconfig.Enabled(c.appCfg) { + blockRate = profilerconfig.BlockRate(c.appCfg) + mutexRate = profilerconfig.MutexRate(c.appCfg) + } + + runtime.SetBlockProfileRate(blockRate) + runtime.SetMutexProfileFraction(mutexRate) +} diff --git a/cmd/frostfs-node/reputation.go b/cmd/frostfs-node/reputation.go deleted file mode 100644 index a96bd066e..000000000 --- a/cmd/frostfs-node/reputation.go +++ /dev/null @@ -1,385 +0,0 @@ -package main - -import ( - "context" - "fmt" - - v2reputation "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation" - v2reputationgrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common" - intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate" - localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker" - repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - grpcreputation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/reputation/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller" - intermediateroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/routes" - consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters" - localtrustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller" - localroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/routes" - truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage" - reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -func initReputationService(ctx context.Context, c *cfg) { - wrap, err := repClient.NewFromMorph(c.cfgMorph.client, c.cfgReputation.scriptHash, 0, repClient.TryNotary()) - fatalOnErr(err) - - localKey := c.key.PublicKey().Bytes() - - nmSrc := c.netMapSource - - // storing calculated trusts as a daughter - c.cfgReputation.localTrustStorage = truststorage.New( - truststorage.Prm{}, - ) - - daughterStorage := daughters.New(daughters.Prm{}) - consumerStorage := consumerstorage.New(consumerstorage.Prm{}) - - localTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "local"))} - - managerBuilder := reputationcommon.NewManagerBuilder( - reputationcommon.ManagersPrm{ - NetMapSource: nmSrc, - }, - reputationcommon.WithLogger(c.log), - ) - - localRouteBuilder := localroutes.New( - localroutes.Prm{ - ManagerBuilder: managerBuilder, - Log: localTrustLogger, - }, - ) - - localTrustRouter := createLocalTrustRouter(c, localRouteBuilder, localTrustLogger, daughterStorage) - - intermediateTrustRouter := createIntermediateTrustRouter(c, consumerStorage, managerBuilder) - - eigenTrustController := createEigenTrustController(c, intermediateTrustRouter, localKey, wrap, daughterStorage, consumerStorage) - - c.cfgReputation.localTrustCtrl = createLocalTrustController(c, localTrustLogger, localKey, localTrustRouter) - - addReputationReportHandler(ctx, c) - - server := grpcreputation.New( - reputationrpc.NewSignService( - &c.key.PrivateKey, - reputationrpc.NewResponseService( - &reputationServer{ - cfg: c, - log: c.log, - localRouter: localTrustRouter, - intermediateRouter: intermediateTrustRouter, - routeBuilder: localRouteBuilder, - }, - c.respSvc, - ), - ), - ) - - for _, srv := range c.cfgGRPC.servers { - v2reputationgrpc.RegisterReputationServiceServer(srv, server) - } - - // initialize eigen trust block timer - newEigenTrustIterTimer(c) - - addEigenTrustEpochHandler(ctx, c, eigenTrustController) -} - -func addReputationReportHandler(ctx context.Context, c *cfg) { - addNewEpochAsyncNotificationHandler( - c, - func(ev event.Event) { - c.log.Debug("start reporting reputation on new epoch event") - - var reportPrm localtrustcontroller.ReportPrm - - // report collected values from previous epoch - reportPrm.SetEpoch(ev.(netmap.NewEpoch).EpochNumber() - 1) - - c.cfgReputation.localTrustCtrl.Report(ctx, reportPrm) - }, - ) -} - -func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController *eigentrustctrl.Controller) { - addNewEpochAsyncNotificationHandler( - c, - func(e event.Event) { - epoch := e.(netmap.NewEpoch).EpochNumber() - - log := c.log.With(zap.Uint64("epoch", epoch)) - - duration, err := c.cfgNetmap.wrapper.EpochDuration() - if err != nil { - log.Debug("could not fetch epoch duration", zap.Error(err)) - return - } - - iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations() - if err != nil { - log.Debug("could not fetch iteration number", zap.Error(err)) - return - } - - epochTimer, err := ticker.NewIterationsTicker(duration, iterations, func() { - eigenTrustController.Continue(ctx, - eigentrustctrl.ContinuePrm{ - Epoch: epoch - 1, - }, - ) - }) - if err != nil { - log.Debug("could not create fixed epoch timer", zap.Error(err)) - return - } - - c.cfgMorph.eigenTrustTicker.addEpochTimer(epoch, epochTimer) - }, - ) -} - -func createLocalTrustRouter(c *cfg, localRouteBuilder *localroutes.Builder, localTrustLogger *logger.Logger, daughterStorage *daughters.Storage) *reputationrouter.Router { - // storing received daughter(of current node) trusts as a manager - daughterStorageWriterProvider := &intermediatereputation.DaughterStorageWriterProvider{ - Log: c.log, - Storage: daughterStorage, - } - - remoteLocalTrustProvider := common.NewRemoteTrustProvider( - common.RemoteProviderPrm{ - NetmapKeys: c, - DeadEndProvider: daughterStorageWriterProvider, - ClientCache: c.bgClientCache, - WriterProvider: localreputation.NewRemoteProvider( - localreputation.RemoteProviderPrm{ - Key: &c.key.PrivateKey, - Log: localTrustLogger, - }, - ), - Log: localTrustLogger, - }, - ) - - localTrustRouter := reputationrouter.New( - reputationrouter.Prm{ - LocalServerInfo: c, - RemoteWriterProvider: remoteLocalTrustProvider, - Builder: localRouteBuilder, - }, - reputationrouter.WithLogger(localTrustLogger)) - return localTrustRouter -} - -func createIntermediateTrustRouter(c *cfg, consumerStorage *consumerstorage.Storage, managerBuilder reputationcommon.ManagerBuilder) *reputationrouter.Router { - intermediateTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "intermediate"))} - - consumerStorageWriterProvider := &intermediatereputation.ConsumerStorageWriterProvider{ - Log: c.log, - Storage: consumerStorage, - } - - remoteIntermediateTrustProvider := common.NewRemoteTrustProvider( - common.RemoteProviderPrm{ - NetmapKeys: c, - DeadEndProvider: consumerStorageWriterProvider, - ClientCache: c.bgClientCache, - WriterProvider: intermediatereputation.NewRemoteProvider( - intermediatereputation.RemoteProviderPrm{ - Key: &c.key.PrivateKey, - Log: intermediateTrustLogger, - }, - ), - Log: intermediateTrustLogger, - }, - ) - - intermediateRouteBuilder := intermediateroutes.New( - intermediateroutes.Prm{ - ManagerBuilder: managerBuilder, - Log: intermediateTrustLogger, - }, - ) - - intermediateTrustRouter := reputationrouter.New( - reputationrouter.Prm{ - LocalServerInfo: c, - RemoteWriterProvider: remoteIntermediateTrustProvider, - Builder: intermediateRouteBuilder, - }, - reputationrouter.WithLogger(intermediateTrustLogger), - ) - return intermediateTrustRouter -} - -func createEigenTrustController(c *cfg, intermediateTrustRouter *reputationrouter.Router, localKey []byte, wrap *repClient.Client, - daughterStorage *daughters.Storage, consumerStorage *consumerstorage.Storage) *eigentrustctrl.Controller { - eigenTrustCalculator := eigentrustcalc.New( - eigentrustcalc.Prm{ - AlphaProvider: c.cfgNetmap.wrapper, - InitialTrustSource: intermediatereputation.InitialTrustSource{ - NetMap: c.netMapSource, - }, - IntermediateValueTarget: intermediateTrustRouter, - WorkerPool: c.cfgReputation.workerPool, - FinalResultTarget: intermediatereputation.NewFinalWriterProvider( - intermediatereputation.FinalWriterProviderPrm{ - PrivatKey: &c.key.PrivateKey, - PubKey: localKey, - Client: wrap, - }, - intermediatereputation.FinalWriterWithLogger(c.log), - ), - DaughterTrustSource: &intermediatereputation.DaughterTrustIteratorProvider{ - DaughterStorage: daughterStorage, - ConsumerStorage: consumerStorage, - }, - }, - eigentrustcalc.WithLogger(c.log), - ) - - eigenTrustController := eigentrustctrl.New( - eigentrustctrl.Prm{ - DaughtersTrustCalculator: &intermediatereputation.DaughtersTrustCalculator{ - Calculator: eigenTrustCalculator, - }, - IterationsProvider: c.cfgNetmap.wrapper, - WorkerPool: c.cfgReputation.workerPool, - }, - eigentrustctrl.WithLogger(c.log), - ) - return eigenTrustController -} - -func createLocalTrustController(c *cfg, localTrustLogger *logger.Logger, localKey []byte, localTrustRouter *reputationrouter.Router) *localtrustcontroller.Controller { - localTrustStorage := &localreputation.TrustStorage{ - Log: localTrustLogger, - Storage: c.cfgReputation.localTrustStorage, - NmSrc: c.netMapSource, - LocalKey: localKey, - } - - return localtrustcontroller.New( - localtrustcontroller.Prm{ - LocalTrustSource: localTrustStorage, - LocalTrustTarget: localTrustRouter, - }, - localtrustcontroller.WithLogger(c.log), - ) -} - -type reputationServer struct { - *cfg - log *logger.Logger - localRouter *reputationrouter.Router - intermediateRouter *reputationrouter.Router - routeBuilder reputationrouter.Builder -} - -func (s *reputationServer) AnnounceLocalTrust(ctx context.Context, req *v2reputation.AnnounceLocalTrustRequest) (*v2reputation.AnnounceLocalTrustResponse, error) { - passedRoute := reverseRoute(req.GetVerificationHeader()) - passedRoute = append(passedRoute, s) - - body := req.GetBody() - - ep := &common.EpochProvider{ - E: body.GetEpoch(), - } - - w, err := s.localRouter.InitWriter(reputationrouter.NewRouteInfo(ep, passedRoute)) - if err != nil { - return nil, fmt.Errorf("could not initialize local trust writer: %w", err) - } - - for _, trust := range body.GetTrusts() { - err = s.processLocalTrust(ctx, body.GetEpoch(), apiToLocalTrust(&trust, passedRoute[0].PublicKey()), passedRoute, w) - if err != nil { - return nil, fmt.Errorf("could not write one of local trusts: %w", err) - } - } - - resp := new(v2reputation.AnnounceLocalTrustResponse) - resp.SetBody(new(v2reputation.AnnounceLocalTrustResponseBody)) - - return resp, nil -} - -func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *v2reputation.AnnounceIntermediateResultRequest) (*v2reputation.AnnounceIntermediateResultResponse, error) { - passedRoute := reverseRoute(req.GetVerificationHeader()) - passedRoute = append(passedRoute, s) - - body := req.GetBody() - - ei := eigentrust.NewEpochIteration(body.GetEpoch(), body.GetIteration()) - - w, err := s.intermediateRouter.InitWriter(reputationrouter.NewRouteInfo(ei, passedRoute)) - if err != nil { - return nil, fmt.Errorf("could not initialize trust writer: %w", err) - } - - v2Trust := body.GetTrust() - - trust := apiToLocalTrust(v2Trust.GetTrust(), v2Trust.GetTrustingPeer().GetPublicKey()) - - err = w.Write(ctx, trust) - if err != nil { - return nil, fmt.Errorf("could not write trust: %w", err) - } - - resp := new(v2reputation.AnnounceIntermediateResultResponse) - resp.SetBody(new(v2reputation.AnnounceIntermediateResultResponseBody)) - - return resp, nil -} - -func (s *reputationServer) processLocalTrust(ctx context.Context, epoch uint64, t reputation.Trust, - passedRoute []reputationcommon.ServerInfo, w reputationcommon.Writer) error { - err := reputationrouter.CheckRoute(s.routeBuilder, epoch, t, passedRoute) - if err != nil { - return fmt.Errorf("wrong route of reputation trust value: %w", err) - } - - return w.Write(ctx, t) -} - -// apiToLocalTrust converts v2 Trust to local reputation.Trust, adding trustingPeer. -func apiToLocalTrust(t *v2reputation.Trust, trustingPeer []byte) reputation.Trust { - var trusted, trusting apireputation.PeerID - trusted.SetPublicKey(t.GetPeer().GetPublicKey()) - trusting.SetPublicKey(trustingPeer) - - localTrust := reputation.Trust{} - - localTrust.SetValue(reputation.TrustValueFromFloat64(t.GetValue())) - localTrust.SetPeer(trusted) - localTrust.SetTrustingPeer(trusting) - - return localTrust -} - -func reverseRoute(hdr *session.RequestVerificationHeader) (passedRoute []reputationcommon.ServerInfo) { - for hdr != nil { - passedRoute = append(passedRoute, &common.OnlyKeyRemoteServerInfo{ - Key: hdr.GetBodySignature().GetKey(), - }) - - hdr = hdr.GetOrigin() - } - - return -} diff --git a/cmd/frostfs-node/reputation/common/remote.go b/cmd/frostfs-node/reputation/common/remote.go deleted file mode 100644 index cd0a024a9..000000000 --- a/cmd/frostfs-node/reputation/common/remote.go +++ /dev/null @@ -1,100 +0,0 @@ -package common - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -type clientCache interface { - Get(client.NodeInfo) (client.Client, error) -} - -// clientKeyRemoteProvider must provide a remote writer and take into account -// that requests must be sent via the passed api client and must be signed with -// the passed private key. -type clientKeyRemoteProvider interface { - WithClient(client.Client) reputationcommon.WriterProvider -} - -// RemoteTrustProvider is an implementation of reputation RemoteWriterProvider interface. -// It caches clients, checks if it is the end of the route and checks either the current -// node is a remote target or not. -// -// remoteTrustProvider requires to be provided with clientKeyRemoteProvider. -type RemoteTrustProvider struct { - netmapKeys netmap.AnnouncedKeys - deadEndProvider reputationcommon.WriterProvider - clientCache clientCache - remoteProvider clientKeyRemoteProvider - log *logger.Logger -} - -// RemoteProviderPrm groups the required parameters of the remoteTrustProvider's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type RemoteProviderPrm struct { - NetmapKeys netmap.AnnouncedKeys - DeadEndProvider reputationcommon.WriterProvider - ClientCache clientCache - WriterProvider clientKeyRemoteProvider - Log *logger.Logger -} - -func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider { - switch { - case prm.NetmapKeys == nil: - PanicOnPrmValue("NetmapKeys", prm.NetmapKeys) - case prm.DeadEndProvider == nil: - PanicOnPrmValue("DeadEndProvider", prm.DeadEndProvider) - case prm.ClientCache == nil: - PanicOnPrmValue("ClientCache", prm.ClientCache) - case prm.WriterProvider == nil: - PanicOnPrmValue("WriterProvider", prm.WriterProvider) - case prm.Log == nil: - PanicOnPrmValue("Logger", prm.Log) - } - - return &RemoteTrustProvider{ - netmapKeys: prm.NetmapKeys, - deadEndProvider: prm.DeadEndProvider, - clientCache: prm.ClientCache, - remoteProvider: prm.WriterProvider, - log: prm.Log, - } -} - -func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) { - rtp.log.Debug("initializing remote writer provider") - - if srv == nil { - rtp.log.Debug("route has reached dead-end provider") - return rtp.deadEndProvider, nil - } - - if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) { - // if local => return no-op writer - rtp.log.Debug("initializing no-op writer provider") - return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil - } - - var info client.NodeInfo - - err := client.NodeInfoFromRawNetmapElement(&info, srv) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - c, err := rtp.clientCache.Get(info) - if err != nil { - return nil, fmt.Errorf("could not initialize API client: %w", err) - } - - return rtp.remoteProvider.WithClient(c), nil -} diff --git a/cmd/frostfs-node/reputation/common/util.go b/cmd/frostfs-node/reputation/common/util.go deleted file mode 100644 index 443adb388..000000000 --- a/cmd/frostfs-node/reputation/common/util.go +++ /dev/null @@ -1,53 +0,0 @@ -package common - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" -) - -type EpochProvider struct { - E uint64 -} - -func (ep *EpochProvider) Epoch() uint64 { - return ep.E -} - -type NopReputationWriter struct{} - -func (NopReputationWriter) Write(context.Context, reputation.Trust) error { - return nil -} - -func (NopReputationWriter) Close(context.Context) error { - return nil -} - -// OnlyKeyRemoteServerInfo is an implementation of reputation.ServerInfo -// interface but with only public key data. -type OnlyKeyRemoteServerInfo struct { - Key []byte -} - -func (i *OnlyKeyRemoteServerInfo) PublicKey() []byte { - return i.Key -} - -func (*OnlyKeyRemoteServerInfo) IterateAddresses(func(string) bool) { -} - -func (*OnlyKeyRemoteServerInfo) NumberOfAddresses() int { - return 0 -} - -func (*OnlyKeyRemoteServerInfo) ExternalAddresses() []string { - return nil -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func PanicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} diff --git a/cmd/frostfs-node/reputation/intermediate/calculator.go b/cmd/frostfs-node/reputation/intermediate/calculator.go deleted file mode 100644 index 73dd12311..000000000 --- a/cmd/frostfs-node/reputation/intermediate/calculator.go +++ /dev/null @@ -1,57 +0,0 @@ -package intermediate - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// InitialTrustSource is an implementation of the -// reputation/eigentrust/calculator's InitialTrustSource interface. -type InitialTrustSource struct { - NetMap netmap.Source -} - -var ErrEmptyNetMap = errors.New("empty NepMap") - -// InitialTrust returns `initialTrust` as an initial trust value. -func (i InitialTrustSource) InitialTrust(apireputation.PeerID) (reputation.TrustValue, error) { - nm, err := i.NetMap.GetNetMap(1) - if err != nil { - return reputation.TrustZero, fmt.Errorf("failed to get NetMap: %w", err) - } - - nodeCount := reputation.TrustValueFromFloat64(float64(len(nm.Nodes()))) - if nodeCount == 0 { - return reputation.TrustZero, ErrEmptyNetMap - } - - return reputation.TrustOne.Div(nodeCount), nil -} - -// DaughtersTrustCalculator wraps EigenTrust calculator and implements the -// eigentrust/calculator's DaughtersTrustCalculator interface. -type DaughtersTrustCalculator struct { - Calculator *eigencalc.Calculator -} - -// Calculate converts and passes values to the wrapped calculator. -func (c *DaughtersTrustCalculator) Calculate(ctx context.Context, iterCtx eigentrustctrl.IterationContext) { - calcPrm := eigencalc.CalculatePrm{} - epochIteration := eigentrust.EpochIteration{} - - epochIteration.SetEpoch(iterCtx.Epoch()) - epochIteration.SetI(iterCtx.I()) - - calcPrm.SetLast(iterCtx.Last()) - calcPrm.SetEpochIteration(epochIteration) - - c.Calculator.Calculate(ctx, calcPrm) -} diff --git a/cmd/frostfs-node/reputation/intermediate/consumers.go b/cmd/frostfs-node/reputation/intermediate/consumers.go deleted file mode 100644 index 33eab605b..000000000 --- a/cmd/frostfs-node/reputation/intermediate/consumers.go +++ /dev/null @@ -1,65 +0,0 @@ -package intermediate - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -var ErrIncorrectContextPanicMsg = "could not write intermediate trust: passed context incorrect" - -// ConsumerStorageWriterProvider is an implementation of the reputation.WriterProvider -// interface that provides ConsumerTrustWriter writer. -type ConsumerStorageWriterProvider struct { - Log *logger.Logger - Storage *consumerstorage.Storage -} - -// ConsumerTrustWriter is an implementation of the reputation.Writer interface -// that writes passed consumer's Trust values to the Consumer storage. After writing -// that, values can be used in eigenTrust algorithm's iterations. -type ConsumerTrustWriter struct { - log *logger.Logger - storage *consumerstorage.Storage - iterInfo eigencalc.EpochIterationInfo -} - -func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error { - w.log.Debug("writing received consumer's trusts", - zap.Uint64("epoch", w.iterInfo.Epoch()), - zap.Uint32("iteration", w.iterInfo.I()), - zap.Stringer("trusting_peer", t.TrustingPeer()), - zap.Stringer("trusted_peer", t.Peer()), - ) - - trust := eigentrust.IterationTrust{Trust: t} - - trust.SetEpoch(w.iterInfo.Epoch()) - trust.SetI(w.iterInfo.I()) - - w.storage.Put(trust) - return nil -} - -func (w *ConsumerTrustWriter) Close(context.Context) error { - return nil -} - -func (s *ConsumerStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) { - iterInfo, ok := ep.(eigencalc.EpochIterationInfo) - if !ok { - panic(ErrIncorrectContextPanicMsg) - } - - return &ConsumerTrustWriter{ - log: s.Log, - storage: s.Storage, - iterInfo: iterInfo, - }, nil -} diff --git a/cmd/frostfs-node/reputation/intermediate/contract.go b/cmd/frostfs-node/reputation/intermediate/contract.go deleted file mode 100644 index 6303b1219..000000000 --- a/cmd/frostfs-node/reputation/intermediate/contract.go +++ /dev/null @@ -1,146 +0,0 @@ -package intermediate - -import ( - "crypto/ecdsa" - "fmt" - - repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -// FinalWriterProviderPrm groups the required parameters of the FinalWriterProvider's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type FinalWriterProviderPrm struct { - PrivatKey *ecdsa.PrivateKey - PubKey []byte - Client *repClient.Client -} - -// NewFinalWriterProvider creates a new instance of the FinalWriterProvider. -// -// Panics if at least one value of the parameters is invalid. -// -// The created FinalWriterProvider does not require additional -// initialization and is completely ready for work. -func NewFinalWriterProvider(prm FinalWriterProviderPrm, opts ...FinalWriterOption) *FinalWriterProvider { - o := defaultFinalWriterOptionsOpts() - - for i := range opts { - opts[i](o) - } - - return &FinalWriterProvider{ - prm: prm, - opts: o, - } -} - -// FinalWriterProvider is an implementation of the reputation.eigentrust.calculator -// IntermediateWriterProvider interface. It inits FinalWriter. -type FinalWriterProvider struct { - prm FinalWriterProviderPrm - opts *finalWriterOptions -} - -func (fwp FinalWriterProvider) InitIntermediateWriter( - _ eigentrustcalc.EpochIterationInfo) (eigentrustcalc.IntermediateWriter, error) { - return &FinalWriter{ - privatKey: fwp.prm.PrivatKey, - pubKey: fwp.prm.PubKey, - client: fwp.prm.Client, - l: fwp.opts.log, - }, nil -} - -// FinalWriter is an implementation of the reputation.eigentrust.calculator IntermediateWriter -// interface that writes GlobalTrust to contract directly. -type FinalWriter struct { - privatKey *ecdsa.PrivateKey - pubKey []byte - client *repClient.Client - - l *logger.Logger -} - -func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error { - fw.l.Debug("start writing global trusts to contract") - - args := repClient.PutPrm{} - - apiTrustedPeerID := t.Peer() - - var apiTrust apireputation.Trust - apiTrust.SetValue(t.Value().Float64()) - apiTrust.SetPeer(t.Peer()) - - var managerPublicKey [33]byte - copy(managerPublicKey[:], fw.pubKey) - - var apiMangerPeerID apireputation.PeerID - apiMangerPeerID.SetPublicKey(managerPublicKey[:]) - - var gTrust apireputation.GlobalTrust - gTrust.SetTrust(apiTrust) - gTrust.SetManager(apiMangerPeerID) - - err := gTrust.Sign(frostfsecdsa.Signer(*fw.privatKey)) - if err != nil { - fw.l.Debug( - "failed to sign global trust", - zap.Error(err), - ) - return fmt.Errorf("failed to sign global trust: %w", err) - } - - args.SetEpoch(t.Epoch()) - args.SetValue(gTrust) - args.SetPeerID(apiTrustedPeerID) - - err = fw.client.Put( - args, - ) - if err != nil { - fw.l.Debug( - "failed to write global trust to contract", - zap.Error(err), - ) - return fmt.Errorf("failed to write global trust to contract: %w", err) - } - - fw.l.Debug( - "sent global trust to contract", - zap.Uint64("epoch", t.Epoch()), - zap.Float64("value", t.Value().Float64()), - zap.Stringer("peer", t.Peer()), - ) - - return nil -} - -type finalWriterOptions struct { - log *logger.Logger -} - -type FinalWriterOption func(*finalWriterOptions) - -func defaultFinalWriterOptionsOpts() *finalWriterOptions { - return &finalWriterOptions{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -func FinalWriterWithLogger(l *logger.Logger) FinalWriterOption { - return func(o *finalWriterOptions) { - if l != nil { - o.log = l - } - } -} diff --git a/cmd/frostfs-node/reputation/intermediate/daughters.go b/cmd/frostfs-node/reputation/intermediate/daughters.go deleted file mode 100644 index d72eead43..000000000 --- a/cmd/frostfs-node/reputation/intermediate/daughters.go +++ /dev/null @@ -1,50 +0,0 @@ -package intermediate - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// DaughterStorageWriterProvider is an implementation of the reputation.WriterProvider -// interface that provides DaughterTrustWriter writer. -type DaughterStorageWriterProvider struct { - Log *logger.Logger - Storage *daughters.Storage -} - -// DaughterTrustWriter is an implementation of the reputation.Writer interface -// that writes passed daughter's Trust values to Daughter storage. After writing -// that, values can be used in eigenTrust algorithm's iterations. -type DaughterTrustWriter struct { - log *logger.Logger - storage *daughters.Storage - ep reputationcommon.EpochProvider -} - -func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error { - w.log.Debug("writing received daughter's trusts", - zap.Uint64("epoch", w.ep.Epoch()), - zap.Stringer("trusting_peer", t.TrustingPeer()), - zap.Stringer("trusted_peer", t.Peer()), - ) - - w.storage.Put(w.ep.Epoch(), t) - return nil -} - -func (w *DaughterTrustWriter) Close(context.Context) error { - return nil -} - -func (s *DaughterStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) { - return &DaughterTrustWriter{ - log: s.Log, - storage: s.Storage, - ep: ep, - }, nil -} diff --git a/cmd/frostfs-node/reputation/intermediate/remote.go b/cmd/frostfs-node/reputation/intermediate/remote.go deleted file mode 100644 index b1a218b94..000000000 --- a/cmd/frostfs-node/reputation/intermediate/remote.go +++ /dev/null @@ -1,124 +0,0 @@ -package intermediate - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client" - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type RemoteProviderPrm struct { - Key *ecdsa.PrivateKey - Log *logger.Logger -} - -// NewRemoteProvider creates a new instance of the RemoteProvider. -// -// Panics if at least one value of the parameters is invalid. -// -// The created RemoteProvider does not require additional -// initialization and is completely ready for work. -func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider { - switch { - case prm.Key == nil: - common.PanicOnPrmValue("NetMapSource", prm.Key) - case prm.Log == nil: - common.PanicOnPrmValue("Logger", prm.Log) - } - - return &RemoteProvider{ - key: prm.Key, - log: prm.Log, - } -} - -// RemoteProvider is an implementation of the clientKeyRemoteProvider interface. -type RemoteProvider struct { - key *ecdsa.PrivateKey - log *logger.Logger -} - -func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider { - return &TrustWriterProvider{ - client: c, - key: rp.key, - log: rp.log, - } -} - -type TrustWriterProvider struct { - client coreclient.Client - key *ecdsa.PrivateKey - log *logger.Logger -} - -func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) { - iterInfo, ok := ep.(eigentrustcalc.EpochIterationInfo) - if !ok { - // TODO: #1164 think if this can be done without such limitation - panic(ErrIncorrectContextPanicMsg) - } - - return &RemoteTrustWriter{ - iterInfo: iterInfo, - client: twp.client, - key: twp.key, - log: twp.log, - }, nil -} - -type RemoteTrustWriter struct { - iterInfo eigentrustcalc.EpochIterationInfo - client coreclient.Client - key *ecdsa.PrivateKey - log *logger.Logger -} - -// Write sends a trust value to a remote node via ReputationService.AnnounceIntermediateResult RPC. -func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) error { - epoch := rtp.iterInfo.Epoch() - i := rtp.iterInfo.I() - - rtp.log.Debug("announcing trust", - zap.Uint64("epoch", epoch), - zap.Uint32("iteration", i), - zap.Stringer("trusting_peer", t.TrustingPeer()), - zap.Stringer("trusted_peer", t.Peer()), - ) - - var apiTrust reputationapi.Trust - apiTrust.SetValue(t.Value().Float64()) - apiTrust.SetPeer(t.Peer()) - - var apiPeerToPeerTrust reputationapi.PeerToPeerTrust - apiPeerToPeerTrust.SetTrustingPeer(t.TrustingPeer()) - apiPeerToPeerTrust.SetTrust(apiTrust) - - var p internalclient.AnnounceIntermediatePrm - - p.SetClient(rtp.client) - p.SetEpoch(epoch) - p.SetIteration(i) - p.SetTrust(apiPeerToPeerTrust) - - _, err := internalclient.AnnounceIntermediate(ctx, p) - - return err -} - -func (rtp *RemoteTrustWriter) Close(context.Context) error { - return nil -} diff --git a/cmd/frostfs-node/reputation/intermediate/storage.go b/cmd/frostfs-node/reputation/intermediate/storage.go deleted file mode 100644 index db29ff92b..000000000 --- a/cmd/frostfs-node/reputation/intermediate/storage.go +++ /dev/null @@ -1,64 +0,0 @@ -package intermediate - -import ( - "fmt" - - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// DaughterTrustIteratorProvider is an implementation of the -// reputation/eigentrust/calculator's DaughterTrustIteratorProvider interface. -type DaughterTrustIteratorProvider struct { - DaughterStorage *daughters.Storage - ConsumerStorage *consumerstorage.Storage -} - -// InitDaughterIterator returns an iterator over the received -// local trusts for ctx.Epoch() epoch from daughter p. -func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc.EpochIterationInfo, - p apireputation.PeerID) (eigentrustcalc.TrustIterator, error) { - epoch := ctx.Epoch() - - daughterIterator, ok := ip.DaughterStorage.DaughterTrusts(epoch, p) - if !ok { - return nil, fmt.Errorf("no data in %d epoch for daughter: %s", epoch, p) - } - - return daughterIterator, nil -} - -// InitAllDaughtersIterator returns an iterator over all -// daughters of the current node(manager) and all local -// trusts received from them for ctx.Epoch() epoch. -func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator( - ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) { - epoch := ctx.Epoch() - - iter, ok := ip.DaughterStorage.AllDaughterTrusts(epoch) - if !ok { - return nil, fmt.Errorf("no data in %d epoch for daughters", epoch) - } - - return iter, nil -} - -// InitConsumersIterator returns an iterator over all daughters -// of the current node(manager) and all their consumers' local -// trusts for ctx.Epoch() epoch and ctx.I() iteration. -func (ip *DaughterTrustIteratorProvider) InitConsumersIterator( - ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) { - epoch, iter := ctx.Epoch(), ctx.I() - - consumerIterator, ok := ip.ConsumerStorage.Consumers(epoch, iter) - if !ok { - return nil, fmt.Errorf("no data for %d iteration in %d epoch for consumers's trusts", - iter, - epoch, - ) - } - - return consumerIterator, nil -} diff --git a/cmd/frostfs-node/reputation/internal/client/client.go b/cmd/frostfs-node/reputation/internal/client/client.go deleted file mode 100644 index ff5131262..000000000 --- a/cmd/frostfs-node/reputation/internal/client/client.go +++ /dev/null @@ -1,101 +0,0 @@ -package internal - -import ( - "context" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -type commonPrm struct { - cli coreclient.Client -} - -// SetClient sets the base client for FrostFS API communication. -// -// Required parameter. -func (x *commonPrm) SetClient(cli coreclient.Client) { - x.cli = cli -} - -// AnnounceLocalPrm groups parameters of AnnounceLocal operation. -type AnnounceLocalPrm struct { - commonPrm - - cliPrm client.PrmAnnounceLocalTrust -} - -// SetEpoch sets the epoch in which the trust was assessed. -func (x *AnnounceLocalPrm) SetEpoch(epoch uint64) { - x.cliPrm.SetEpoch(epoch) -} - -// SetTrusts sets a list of local trust values. -func (x *AnnounceLocalPrm) SetTrusts(ts []reputation.Trust) { - x.cliPrm.SetValues(ts) -} - -// AnnounceLocalRes groups the resulting values of AnnounceLocal operation. -type AnnounceLocalRes struct{} - -// AnnounceLocal sends estimations of local trust to the remote node. -// -// Client, context and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -func AnnounceLocal(ctx context.Context, prm AnnounceLocalPrm) (res AnnounceLocalRes, err error) { - var cliRes *client.ResAnnounceLocalTrust - - cliRes, err = prm.cli.AnnounceLocalTrust(ctx, prm.cliPrm) - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(cliRes.Status()) - } - - return -} - -// AnnounceIntermediatePrm groups parameters of AnnounceIntermediate operation. -type AnnounceIntermediatePrm struct { - commonPrm - - cliPrm client.PrmAnnounceIntermediateTrust -} - -// SetEpoch sets the number of the epoch when the trust calculation's iteration was executed. -func (x *AnnounceIntermediatePrm) SetEpoch(epoch uint64) { - x.cliPrm.SetEpoch(epoch) -} - -// SetIteration sets the number of the iteration of the trust calculation algorithm. -func (x *AnnounceIntermediatePrm) SetIteration(iter uint32) { - x.cliPrm.SetIteration(iter) -} - -// SetTrust sets the current global trust value computed at the iteration. -func (x *AnnounceIntermediatePrm) SetTrust(t reputation.PeerToPeerTrust) { - x.cliPrm.SetCurrentValue(t) -} - -// AnnounceIntermediateRes groups the resulting values of AnnounceIntermediate operation. -type AnnounceIntermediateRes struct{} - -// AnnounceIntermediate sends the global trust value calculated at the specified iteration -// and epoch to to the remote node. -// -// Client, context and key must be set. -// -// Returns any error which prevented the operation from completing correctly in error return. -func AnnounceIntermediate(ctx context.Context, prm AnnounceIntermediatePrm) (res AnnounceIntermediateRes, err error) { - var cliRes *client.ResAnnounceIntermediateTrust - - cliRes, err = prm.cli.AnnounceIntermediateTrust(ctx, prm.cliPrm) - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(cliRes.Status()) - } - - return -} diff --git a/cmd/frostfs-node/reputation/internal/client/doc.go b/cmd/frostfs-node/reputation/internal/client/doc.go deleted file mode 100644 index 1dc66cee6..000000000 --- a/cmd/frostfs-node/reputation/internal/client/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package internal provides functionality for FrostFS Node Reputation system communication with FrostFS network. -// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client. -// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism), -// the Reputation service does not fully use the client's flexible interface. -// -// In this regard, this package provides functions over base API client necessary for the application. -// This allows you to concentrate the entire spectrum of the client's use in one place (this will be convenient -// both when updating the base client and for evaluating the UX of SDK library). So, it is expected that all -// Reputation service packages will be limited to this package for the development of functionality requiring -// FrostFS API communication. -package internal diff --git a/cmd/frostfs-node/reputation/local/remote.go b/cmd/frostfs-node/reputation/local/remote.go deleted file mode 100644 index 3c929a9ca..000000000 --- a/cmd/frostfs-node/reputation/local/remote.go +++ /dev/null @@ -1,112 +0,0 @@ -package local - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client" - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type RemoteProviderPrm struct { - Key *ecdsa.PrivateKey - Log *logger.Logger -} - -// NewRemoteProvider creates a new instance of the RemoteProvider. -// -// Panics if at least one value of the parameters is invalid. -// -// The created RemoteProvider does not require additional -// initialization and is completely ready for work. -func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider { - switch { - case prm.Key == nil: - common.PanicOnPrmValue("NetMapSource", prm.Key) - case prm.Log == nil: - common.PanicOnPrmValue("Logger", prm.Log) - } - - return &RemoteProvider{ - key: prm.Key, - log: prm.Log, - } -} - -// RemoteProvider is an implementation of the clientKeyRemoteProvider interface. -type RemoteProvider struct { - key *ecdsa.PrivateKey - log *logger.Logger -} - -func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider { - return &TrustWriterProvider{ - client: c, - key: rp.key, - log: rp.log, - } -} - -type TrustWriterProvider struct { - client coreclient.Client - key *ecdsa.PrivateKey - log *logger.Logger -} - -func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) { - return &RemoteTrustWriter{ - ep: ep, - client: twp.client, - key: twp.key, - log: twp.log, - }, nil -} - -type RemoteTrustWriter struct { - ep reputationcommon.EpochProvider - client coreclient.Client - key *ecdsa.PrivateKey - log *logger.Logger - - buf []reputationapi.Trust -} - -func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error { - var apiTrust reputationapi.Trust - - apiTrust.SetValue(t.Value().Float64()) - apiTrust.SetPeer(t.Peer()) - - rtp.buf = append(rtp.buf, apiTrust) - - return nil -} - -func (rtp *RemoteTrustWriter) Close(ctx context.Context) error { - epoch := rtp.ep.Epoch() - - rtp.log.Debug("announcing trusts", - zap.Uint64("epoch", epoch), - ) - - var prm internalclient.AnnounceLocalPrm - - prm.SetClient(rtp.client) - prm.SetEpoch(epoch) - prm.SetTrusts(rtp.buf) - - _, err := internalclient.AnnounceLocal(ctx, prm) - - return err -} diff --git a/cmd/frostfs-node/reputation/local/storage.go b/cmd/frostfs-node/reputation/local/storage.go deleted file mode 100644 index 861151871..000000000 --- a/cmd/frostfs-node/reputation/local/storage.go +++ /dev/null @@ -1,107 +0,0 @@ -package local - -import ( - "bytes" - "errors" - - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller" - truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -type TrustStorage struct { - Log *logger.Logger - - Storage *truststorage.Storage - - NmSrc netmapcore.Source - - LocalKey []byte -} - -func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) { - epoch := ep.Epoch() - - s.Log.Debug("initializing iterator over trusts", - zap.Uint64("epoch", epoch), - ) - - epochStorage, err := s.Storage.DataForEpoch(epoch) - if err != nil && !errors.Is(err, truststorage.ErrNoPositiveTrust) { - return nil, err - } - - return &TrustIterator{ - ep: ep, - storage: s, - epochStorage: epochStorage, - }, nil -} - -type TrustIterator struct { - ep reputationcommon.EpochProvider - - storage *TrustStorage - - epochStorage *truststorage.EpochTrustValueStorage -} - -func (it *TrustIterator) Iterate(h reputation.TrustHandler) error { - if it.epochStorage != nil { - err := it.epochStorage.Iterate(h) - if !errors.Is(err, truststorage.ErrNoPositiveTrust) { - return err - } - } - - nm, err := it.storage.NmSrc.GetNetMapByEpoch(it.ep.Epoch()) - if err != nil { - return err - } - - // find out if local node is presented in netmap - localIndex := -1 - - nmNodes := nm.Nodes() - for i := range nmNodes { - if bytes.Equal(nmNodes[i].PublicKey(), it.storage.LocalKey) { - localIndex = i - break - } - } - - ln := len(nmNodes) - if localIndex >= 0 && ln > 0 { - ln-- - } - - // calculate Pj http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5. - p := reputation.TrustOne.Div(reputation.TrustValueFromInt(ln)) - - for i := range nmNodes { - if i == localIndex { - continue - } - - var trusted, trusting apireputation.PeerID - - trusted.SetPublicKey(nmNodes[i].PublicKey()) - trusting.SetPublicKey(it.storage.LocalKey) - - trust := reputation.Trust{} - trust.SetPeer(trusted) - trust.SetValue(p) - trust.SetTrustingPeer(trusting) - - if err := h(trust); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/frostfs-node/reputation/ticker/fixed.go b/cmd/frostfs-node/reputation/ticker/fixed.go deleted file mode 100644 index 5403882d5..000000000 --- a/cmd/frostfs-node/reputation/ticker/fixed.go +++ /dev/null @@ -1,90 +0,0 @@ -package ticker - -import ( - "fmt" - "sync" -) - -// IterationHandler is a callback of a certain block advance. -type IterationHandler func() - -// IterationsTicker represents a fixed tick number block timer. -// -// It can tick the blocks and perform certain actions -// on block time intervals. -type IterationsTicker struct { - m sync.Mutex - - curr uint64 - period uint64 - - times uint64 - - h IterationHandler -} - -// NewIterationsTicker creates a new IterationsTicker. -// -// It guaranties that a handler would be called the -// specified amount of times in the specified amount -// of blocks. After the last meaningful Tick, IterationsTicker -// becomes no-op timer. -// -// Returns an error only if times is greater than totalBlocks. -func NewIterationsTicker(totalBlocks uint64, times uint64, h IterationHandler) (*IterationsTicker, error) { - period := totalBlocks / times - - if period == 0 { - return nil, fmt.Errorf("impossible to tick %d times in %d blocks", - times, totalBlocks, - ) - } - - var curr uint64 - - // try to make handler calls as rare as possible - if totalBlocks%times != 0 { - extraBlocks := (period+1)*times - totalBlocks - - if period >= extraBlocks { - curr = extraBlocks + (period-extraBlocks)/2 - period++ - } - } - - return &IterationsTicker{ - curr: curr, - period: period, - times: times, - h: h, - }, nil -} - -// Tick ticks one block in the IterationsTicker. -// -// Returns `false` if the timer has finished its operations -// and there will be no more handler calls. -// Calling Tick after the returned `false` is safe, no-op -// and also returns `false`. -func (ft *IterationsTicker) Tick() bool { - ft.m.Lock() - defer ft.m.Unlock() - - if ft.times == 0 { - return false - } - - ft.curr++ - - if ft.curr%ft.period == 0 { - ft.h() - - ft.times-- - - if ft.times == 0 { - return false - } - } - - return true -} diff --git a/cmd/frostfs-node/reputation/ticker/fixed_test.go b/cmd/frostfs-node/reputation/ticker/fixed_test.go deleted file mode 100644 index 25e9bd08f..000000000 --- a/cmd/frostfs-node/reputation/ticker/fixed_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package ticker - -import ( - "errors" - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestFixedTimer_Tick(t *testing.T) { - tests := [...]struct { - duration uint64 - times uint64 - err error - }{ - { - duration: 20, - times: 4, - err: nil, - }, - { - duration: 6, - times: 6, - err: nil, - }, - { - duration: 10, - times: 6, - err: nil, - }, - { - duration: 5, - times: 6, - err: errors.New("impossible to tick 6 times in 5 blocks"), - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) { - counter := uint64(0) - - timer, err := NewIterationsTicker(test.duration, test.times, func() { - counter++ - }) - if test.err != nil { - require.EqualError(t, err, test.err.Error()) - return - } - - require.NoError(t, err) - - for i := 0; i < int(test.duration); i++ { - if !timer.Tick() { - break - } - } - - require.Equal(t, false, timer.Tick()) - require.Equal(t, test.times, counter) - }) - } -} - -func TestFixedTimer_RareCalls(t *testing.T) { - tests := [...]struct { - duration uint64 - times uint64 - firstCall uint64 - period uint64 - }{ - { - duration: 11, - times: 6, - firstCall: 1, - period: 2, - }, - { - duration: 11, - times: 4, - firstCall: 2, - period: 3, - }, - { - duration: 20, - times: 3, - firstCall: 4, - period: 7, - }, - } - - for _, test := range tests { - t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) { - var counter uint64 - - timer, err := NewIterationsTicker(test.duration, test.times, func() { - counter++ - }) - require.NoError(t, err) - - checked := false - - for i := 1; i <= int(test.duration); i++ { - if !timer.Tick() { - break - } - - if !checked && counter == 1 { - require.Equal(t, test.firstCall, uint64(i)) - checked = true - } - } - - require.Equal(t, false, timer.Tick()) - require.Equal(t, test.times, counter) - }) - } -} diff --git a/cmd/frostfs-node/timers.go b/cmd/frostfs-node/timers.go deleted file mode 100644 index 2ee2e8656..000000000 --- a/cmd/frostfs-node/timers.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker" -) - -type eigenTrustTickers struct { - m sync.Mutex - - timers map[uint64]*ticker.IterationsTicker -} - -func (e *eigenTrustTickers) addEpochTimer(epoch uint64, timer *ticker.IterationsTicker) { - e.m.Lock() - defer e.m.Unlock() - - e.timers[epoch] = timer -} - -func (e *eigenTrustTickers) tick() { - e.m.Lock() - defer e.m.Unlock() - - for epoch, t := range e.timers { - if !t.Tick() { - delete(e.timers, epoch) - } - } -} - -func tickBlockTimers(c *cfg) { - c.cfgMorph.eigenTrustTicker.tick() -} - -func newEigenTrustIterTimer(c *cfg) { - c.cfgMorph.eigenTrustTicker = &eigenTrustTickers{ - // it is expected to have max 2 concurrent epoch - // in normal mode work - timers: make(map[uint64]*ticker.IterationsTicker, 2), - } -} diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go index bbdb71c64..d963ba866 100644 --- a/cmd/frostfs-node/tracing.go +++ b/cmd/frostfs-node/tracing.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -14,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) { _, err := tracing.Setup(ctx, *conf) if err != nil { - c.log.Error("failed init tracing", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err)) } c.closers = append(c.closers, closer{ @@ -24,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) { defer cancel() err := tracing.Shutdown(ctx) //cfg context cancels before close if err != nil { - c.log.Error("failed shutdown tracing", zap.Error(err)) + c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) } }, }) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 93a364471..b4f43acac 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -6,6 +6,7 @@ import ( "time" treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -37,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) { func initTreeService(c *cfg) { treeConfig := treeconfig.Tree(c.appCfg) if !treeConfig.Enabled() { - c.log.Info("tree service is not enabled, skip initialization") + c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) return } @@ -68,7 +69,7 @@ func initTreeService(c *cfg) { addNewEpochNotificationHandler(c, func(_ event.Event) { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error("could not synchronize Tree Service", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) } }) } else { @@ -79,7 +80,7 @@ func initTreeService(c *cfg) { for range tick.C { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error("could not synchronize Tree Service", zap.Error(err)) + c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) if errors.Is(err, tree.ErrShuttingDown) { return } @@ -92,11 +93,11 @@ func initTreeService(c *cfg) { ev := e.(containerEvent.DeleteSuccess) // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug("removing all trees for container", zap.Stringer("cid", ev.ID)) + c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) err := c.treeService.DropTree(context.Background(), ev.ID, "") if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error("container removal event received, but trees weren't removed", + c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), zap.String("error", err.Error())) } diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index e35fc1afd..d9c0f167f 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -26,13 +26,13 @@ func TestValidate(t *testing.T) { t.Run("mainnet", func(t *testing.T) { os.Clearenv() // ENVs have priority over config files, so we do this in tests p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") - c := config.New(config.Prm{}, config.WithConfigFile(p)) + c := config.New(p, "", config.EnvPrefix) require.NoError(t, validateConfig(c)) }) t.Run("testnet", func(t *testing.T) { os.Clearenv() // ENVs have priority over config files, so we do this in tests p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") - c := config.New(config.Prm{}, config.WithConfigFile(p)) + c := config.New(p, "", config.EnvPrefix) require.NoError(t, validateConfig(c)) }) } diff --git a/cmd/frostfs-node/config/opts.go b/cmd/internal/common/config/opts.go similarity index 57% rename from cmd/frostfs-node/config/opts.go rename to cmd/internal/common/config/opts.go index 009c6efef..46e565639 100644 --- a/cmd/frostfs-node/config/opts.go +++ b/cmd/internal/common/config/opts.go @@ -1,8 +1,12 @@ package config +import "github.com/spf13/viper" + type opts struct { path string configDir string + envPrefix string + v *viper.Viper } func defaultOpts() *opts { @@ -27,3 +31,19 @@ func WithConfigDir(path string) Option { o.configDir = path } } + +// WithEnvPrefix returns an option to defines +// a prefix that ENVIRONMENT variables will use. +func WithEnvPrefix(envPrefix string) Option { + return func(o *opts) { + o.envPrefix = envPrefix + } +} + +// WithViper returns an option to defines +// a predefined viper.Viper. +func WithViper(v *viper.Viper) Option { + return func(o *opts) { + o.v = v + } +} diff --git a/cmd/internal/common/config/viper.go b/cmd/internal/common/config/viper.go new file mode 100644 index 000000000..41b8831ff --- /dev/null +++ b/cmd/internal/common/config/viper.go @@ -0,0 +1,79 @@ +package config + +import ( + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" + "github.com/spf13/viper" +) + +const ( + Separator = "." + + // EnvSeparator is a section separator in ENV variables. + EnvSeparator = "_" +) + +func CreateViper(opts ...Option) (*viper.Viper, error) { + o := defaultOpts() + for i := range opts { + opts[i](o) + } + + var v *viper.Viper + if o.v != nil { + v = o.v + } else { + v = viper.New() + } + + if o.envPrefix != "" { + v.SetEnvPrefix(o.envPrefix) + v.AutomaticEnv() + v.SetEnvKeyReplacer(strings.NewReplacer(Separator, EnvSeparator)) + } + + if o.path != "" { + v.SetConfigFile(o.path) + + err := v.ReadInConfig() + if err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + } + + if o.configDir != "" { + if err := config.ReadConfigDir(v, o.configDir); err != nil { + return nil, fmt.Errorf("failed to read config dir: %w", err) + } + } + + return v, nil +} + +func ReloadViper(opts ...Option) error { + o := defaultOpts() + for i := range opts { + opts[i](o) + } + + if o.v == nil { + return fmt.Errorf("provide viper in opts") + } + + if o.path != "" { + err := o.v.ReadInConfig() + if err != nil { + return fmt.Errorf("rereading configuration file: %w", err) + } + } + + if o.configDir != "" { + if err := config.ReadConfigDir(o.v, o.configDir); err != nil { + return fmt.Errorf("rereading configuration dir: %w", err) + } + } + + return nil +} diff --git a/config/example/ir.env b/config/example/ir.env index 2ec821932..7b8f8a89d 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -49,18 +49,6 @@ FROSTFS_IR_WORKERS_BALANCE=10 FROSTFS_IR_WORKERS_CONTAINER=10 FROSTFS_IR_WORKERS_NEOFS=10 FROSTFS_IR_WORKERS_NETMAP=10 -FROSTFS_IR_WORKERS_REPUTATION=10 -FROSTFS_IR_WORKERS_SUBNET=10 - -FROSTFS_IR_AUDIT_TIMEOUT_GET=5s -FROSTFS_IR_AUDIT_TIMEOUT_HEAD=5s -FROSTFS_IR_AUDIT_TIMEOUT_RANGEHASH=5s -FROSTFS_IR_AUDIT_TIMEOUT_SEARCH=10s -FROSTFS_IR_AUDIT_TASK_EXEC_POOL_SIZE=10 -FROSTFS_IR_AUDIT_TASK_QUEUE_CAPACITY=100 -FROSTFS_IR_AUDIT_PDP_PAIRS_POOL_SIZE=10 -FROSTFS_IR_AUDIT_PDP_MAX_SLEEP_INTERVAL=5s -FROSTFS_IR_AUDIT_POR_POOL_SIZE=10 FROSTFS_IR_INDEXER_CACHE_TIMEOUT=15s @@ -69,14 +57,11 @@ FROSTFS_IR_NETMAP_CLEANER_THRESHOLD=3 FROSTFS_IR_CONTRACTS_NEOFS=ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 FROSTFS_IR_CONTRACTS_PROCESSING=597f5894867113a41e192801709c02497f611de8 -FROSTFS_IR_CONTRACTS_AUDIT=219e37aed2180b87e7fe945dbf97d67125e8d73f FROSTFS_IR_CONTRACTS_BALANCE=d2aa48d14b17b11bc4c68205027884a96706dd16 FROSTFS_IR_CONTRACTS_CONTAINER=ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 FROSTFS_IR_CONTRACTS_NEOFSID=9f5866decbc751a099e74c7c7bc89f609201755a FROSTFS_IR_CONTRACTS_NETMAP=83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 FROSTFS_IR_CONTRACTS_PROXY=abc8794bb40a21f2db5f21ae62741eb46c8cad1c -FROSTFS_IR_CONTRACTS_REPUTATION=d793b842ff0c103fe89e385069e82a27602135ff -FROSTFS_IR_CONTRACTS_SUBNET=e9266864d3c562c6e17f2bb9cb1392aaa293d93a FROSTFS_IR_CONTRACTS_ALPHABET_AMOUNT=7 FROSTFS_IR_CONTRACTS_ALPHABET_AZ=c1d211fceeb4b1dc76b8e4054d11fdf887e418ea FROSTFS_IR_CONTRACTS_ALPHABET_BUKY=e2ba789320899658b100f331bdebb74474757920 @@ -89,10 +74,9 @@ FROSTFS_IR_CONTRACTS_ALPHABET_ZHIVETE=f584699bc2ff457d339fb09f16217042c1a42101 FROSTFS_IR_PPROF_ENABLED=true FROSTFS_IR_PPROF_ADDRESS=localhost:6060 FROSTFS_IR_PPROF_SHUTDOWN_TIMEOUT=30s +FROSTFS_IR_PPROF_BLOCK_RATE=10000 +FROSTFS_IR_PPROF_MUTEX_RATE=10000 FROSTFS_IR_PROMETHEUS_ENABLED=true FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090 FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s - -FROSTFS_IR_SETTLEMENT_BASIC_INCOME_RATE=100 -FROSTFS_IR_SETTLEMENT_AUDIT_FEE=100 diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 3dca0017a..1130a840b 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -83,23 +83,6 @@ workers: container: 10 # Number of workers to process events from container contract in parallel frostfs: 10 # Number of workers to process events from frostfs contracts in parallel netmap: 10 # Number of workers to process events from netmap contract in parallel - reputation: 10 # Number of workers to process events from reputation contract in parallel - subnet: 10 # Number of workers to process events from subnet contract in parallel - -audit: - timeout: - get: 5s # Timeout for object.Get operation during data audit - head: 5s # Timeout for object.Head operation during data audit - rangehash: 5s # Timeout for object.RangeHash operation during data audit - search: 10s # Timeout for object.Search operation during data audit - task: - exec_pool_size: 10 # Number of workers to process audit routine in parallel - queue_capacity: 100 # Maximum amount of simultaneous audit jobs - pdp: - pairs_pool_size: 10 # Number of workers to process PDP part of data audit in parallel - max_sleep_interval: 5s # Maximum timeout between object.RangeHash requests to the storage node - por: - pool_size: 10 # Number of workers to process PoR part of data audit in parallel indexer: cache_timeout: 15s # Duration between internal state update about current list of inner ring nodes @@ -111,14 +94,11 @@ netmap_cleaner: contracts: frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 # Address of FrostFS contract in mainchain; ignore if mainchain is disabled processing: 597f5894867113a41e192801709c02497f611de8 # Address of processing contract in mainchain; ignore if mainchain is disabled or notary is disabled in mainchain - audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f # Optional: override address of audit contract in sidechain balance: d2aa48d14b17b11bc4c68205027884a96706dd16 # Optional: override address of balance contract in sidechain container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 # Optional: override address of container contract in sidechain frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a # Optional: override address of frostfsid contract in sidechain netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 # Optional: override address of netmap contract in sidechain proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c # Optional: override address of proxy contract in sidechain; ignore if notary is disabled in sidechain - reputation: d793b842ff0c103fe89e385069e82a27602135ff # Optional: override address of reputation contract in sidechain - subnet: e9266864d3c562c6e17f2bb9cb1392aaa293d93a # Optional: override address of subnet contract in sidechain alphabet: amount: 7 # Optional: override amount of alphabet contracts az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea # Optional: override address of az alphabet contract in sidechain @@ -133,12 +113,10 @@ pprof: enabled: true address: localhost:6060 # Endpoint for application pprof profiling; disabled by default shutdown_timeout: 30s # Timeout for profiling HTTP server graceful shutdown + block_rate: 10000 # sampling rate: an average of one blocking event per rate nanoseconds spent blocked is reported; "1" reports every blocking event; "0" disables profiler + mutex_rate: 10000 # sampling rate: on average 1/rate events are reported; "0" disables profiler prometheus: enabled: true address: localhost:9090 # Endpoint for application prometheus metrics; disabled by default shutdown_timeout: 30s # Timeout for metrics HTTP server graceful shutdown - -settlement: - basic_income_rate: 100 # Optional: override basic income rate value from network config; applied only in debug mode - audit_fee: 100 # Optional: override audit fee value from network config; applied only in debug mode diff --git a/config/example/node.env b/config/example/node.env index 9a1a8b052..143bf0388 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -3,6 +3,8 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 FROSTFS_PPROF_SHUTDOWN_TIMEOUT=15s +FROSTFS_PPROF_BLOCK_RATE=10000 +FROSTFS_PPROF_MUTEX_RATE=10000 FROSTFS_PROMETHEUS_ENABLED=true FROSTFS_PROMETHEUS_ADDRESS=localhost:9090 @@ -19,8 +21,6 @@ FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state -FROSTFS_NODE_SUBNET_EXIT_ZERO=true -FROSTFS_NODE_SUBNET_ENTRIES=123 456 789 FROSTFS_NODE_NOTIFICATION_ENABLED=true FROSTFS_NODE_NOTIFICATION_ENDPOINT=tls://localhost:4222 FROSTFS_NODE_NOTIFICATION_TIMEOUT=6s @@ -58,7 +58,6 @@ FROSTFS_CONTROL_GRPC_ENDPOINT=localhost:8090 FROSTFS_CONTRACTS_BALANCE=5263abba1abedbf79bb57f3e40b50b4425d2d6cd FROSTFS_CONTRACTS_CONTAINER=5d084790d7aa36cea7b53fe897380dab11d2cd3c FROSTFS_CONTRACTS_NETMAP=0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca -FROSTFS_CONTRACTS_REPUTATION=441995f631c1da2b133462b71859494a5cd45e90 FROSTFS_CONTRACTS_PROXY=ad7c6b55b737b696e5c82c85445040964a03e97f # Morph chain section @@ -187,4 +186,4 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m FROSTFS_TRACING_ENABLED=true FROSTFS_TRACING_ENDPOINT="localhost" -FROSTFS_TRACING_EXPORTER="otlp_grpc" \ No newline at end of file +FROSTFS_TRACING_EXPORTER="otlp_grpc" diff --git a/config/example/node.json b/config/example/node.json index 8cfb5bb69..04aabdd42 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -5,7 +5,9 @@ "pprof": { "enabled": true, "address": "localhost:6060", - "shutdown_timeout": "15s" + "shutdown_timeout": "15s", + "block_rate": 10000, + "mutex_rate": 10000 }, "prometheus": { "enabled": true, @@ -34,14 +36,6 @@ "persistent_state": { "path": "/state" }, - "subnet": { - "exit_zero": true, - "entries": [ - "123", - "456", - "789" - ] - }, "notification": { "enabled": true, "endpoint": "tls://localhost:4222", @@ -96,7 +90,6 @@ "balance": "5263abba1abedbf79bb57f3e40b50b4425d2d6cd", "container": "5d084790d7aa36cea7b53fe897380dab11d2cd3c", "netmap": "0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca", - "reputation": "441995f631c1da2b133462b71859494a5cd45e90", "proxy": "ad7c6b55b737b696e5c82c85445040964a03e97f" }, "morph": { diff --git a/config/example/node.yaml b/config/example/node.yaml index e3b41d413..bc665a688 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -5,6 +5,8 @@ pprof: enabled: true address: localhost:6060 # endpoint for Node profiling shutdown_timeout: 15s # timeout for profiling HTTP server graceful shutdown + block_rate: 10000 # sampling rate: an average of one blocking event per rate nanoseconds spent blocked is reported; "1" reports every blocking event; "0" disables profiler + mutex_rate: 10000 # sampling rate: on average 1/rate events are reported; "0" disables profiler prometheus: enabled: true @@ -29,12 +31,6 @@ node: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node - subnet: - exit_zero: true # toggle entrance to zero subnet (overrides corresponding attribute and occurrence in `entries`) - entries: # list of IDs of subnets to enter in a text format of FrostFS API protocol (overrides corresponding attributes) - - 123 - - 456 - - 789 notification: enabled: true # turn on object notification service endpoint: "tls://localhost:4222" # notification server endpoint @@ -78,7 +74,6 @@ contracts: # side chain NEOFS contract script hashes; optional, override values balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca - reputation: 441995f631c1da2b133462b71859494a5cd45e90 proxy: ad7c6b55b737b696e5c82c85445040964a03e97f morph: @@ -219,4 +214,3 @@ tracing: enabled: true exporter: "otlp_grpc" endpoint: "localhost" - \ No newline at end of file diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml index 2b240e0b2..7db476e55 100644 --- a/config/mainnet/config.yml +++ b/config/mainnet/config.yml @@ -68,4 +68,3 @@ contracts: balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 - reputation: 7ad824fd1eeb1565be2cee3889214b9aa605d2fc diff --git a/config/testnet/config.yml b/config/testnet/config.yml index 416ba35e4..76b36cdf6 100644 --- a/config/testnet/config.yml +++ b/config/testnet/config.yml @@ -16,7 +16,6 @@ contracts: balance: e0420c216003747626670d1424569c17c79015bf container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 netmap: d4b331639799e2958d4bc5b711b469d79de94e01 - reputation: 376c23a2ae1fad088c82046abb59984e3c4519d9 node: key: /node.key diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 6ecd15907..2c78cf6b1 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -75,13 +75,23 @@ element. Contains configuration for the `pprof` profiler. -| Parameter | Type | Default value | Description | -|--------------------|------------|---------------|-----------------------------------------| -| `enabled` | `bool` | `false` | Flag to enable the service. | -| `address` | `string` | | Address that service listener binds to. | -| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. | +| Parameter | Type | Default value | Description | +|--------------------|-----------------------------------|---------------|-----------------------------------------| +| `enabled` | `bool` | `false` | Flag to enable the service. | +| `address` | `string` | | Address that service listener binds to. | +| `shutdown_timeout` | `duration` | `30s` | Time to wait for a graceful shutdown. | +| `debug` | [Debug config](#debug-subsection) | | Optional profiles configuration | +## `debug` subsection + +Contains optional profiles configuration. + +| Parameter | Type | Default value | Description | +|--------------|-------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `block_rate` | `int` | `0` | Controls the block profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetBlockProfileRate. | +| `mutex_rate` | `int` | `0` | Controls the mutex profiler. Non-positive values disable profiler reports. For more information: https://pkg.go.dev/runtime@go1.20.3#SetMutexProfileFraction. | + # `prometheus` section Contains configuration for the `prometheus` metrics service. @@ -113,18 +123,14 @@ contracts: balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca - reputation: 441995f631c1da2b133462b71859494a5cd45e90 proxy: ad7c6b55b737b696e5c82c85445040964a03e97f ``` | Parameter | Type | Default value | Description | |--------------|-----------|---------------|---------------------------| -| `audit` | `hash160` | | Audit contract hash. | | `balance` | `hash160` | | Balance contract hash. | | `container` | `hash160` | | Container contract hash. | | `netmap` | `hash160` | | Netmap contract hash. | -| `reputation` | `hash160` | | Reputation contract hash. | -| `subnet` | `hash160` | | Subnet contract hash. | # `morph` section @@ -302,10 +308,6 @@ node: path: /sessions persistent_state: path: /state - subnet: - exit_zero: false - entries: - - 123 notification: enabled: true endpoint: tls://localhost:4222 @@ -325,7 +327,6 @@ node: | `relay` | `bool` | | Enable relay mode. | | `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | | `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | -| `subnet` | [Subnet config](#subnet-subsection) | | Subnet configuration. | | `notification` | [Notification config](#notification-subsection) | | NATS configuration. | @@ -354,14 +355,6 @@ It is used to correctly handle node restarts or crashes. |-----------|----------|------------------------|------------------------| | `path` | `string` | `.frostfs-storage-state` | Path to the database. | -## `subnet` subsection -This is an advanced section, use with caution. - -| Parameter | Type | Default value | Description | -|-------------|------------|---------------|------------------------------------------------------| -| `exit_zero` | `bool` | `false` | Exit from the default subnet. | -| `entries` | `[]uint32` | | List of non-default subnet ID this node belongs to. | - ## `notification` subsection This is an advanced section, use with caution. diff --git a/go.mod b/go.mod index 301be6934..2e9286a05 100644 --- a/go.mod +++ b/go.mod @@ -3,42 +3,42 @@ module git.frostfs.info/TrueCloudLab/frostfs-node go 1.18 require ( - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.0 + git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230418080822-bd44a3f47b85 git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230316081442-bec77f280a85 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230505094539-15b4287092bd git.frostfs.info/TrueCloudLab/hrw v1.2.0 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 github.com/cheggaaa/pb v1.0.29 github.com/chzyer/readline v1.5.1 github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 - github.com/google/go-github/v39 v39.2.0 github.com/google/uuid v1.3.0 github.com/hashicorp/golang-lru/v2 v2.0.1 - github.com/klauspost/compress v1.15.13 + github.com/klauspost/compress v1.16.5 github.com/mitchellh/go-homedir v1.1.0 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.8.0 - github.com/nats-io/nats.go v1.22.1 + github.com/multiformats/go-multiaddr v0.9.0 + github.com/nats-io/nats.go v1.25.0 github.com/nspcc-dev/neo-go v0.100.1 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.4.0 - github.com/paulmach/orb v0.2.2 - github.com/prometheus/client_golang v1.13.0 + github.com/paulmach/orb v0.9.1 + github.com/prometheus/client_golang v1.15.0 + github.com/prometheus/client_model v0.3.0 github.com/spf13/cast v1.5.0 - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.8.2 go.etcd.io/bbolt v1.3.6 go.opentelemetry.io/otel v1.14.0 go.opentelemetry.io/otel/trace v1.14.0 - go.uber.org/atomic v1.10.0 + go.uber.org/atomic v1.11.0 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 - golang.org/x/sync v0.1.0 - golang.org/x/term v0.5.0 + golang.org/x/sync v0.2.0 + golang.org/x/term v0.8.0 google.golang.org/grpc v1.53.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/protobuf v1.30.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -56,38 +56,36 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.3 // indirect - github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/ipfs/go-cid v0.3.2 // indirect - github.com/klauspost/cpuid/v2 v2.2.2 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nats-io/nats-server/v2 v2.7.4 // indirect - github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nkeys v0.4.4 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect github.com/nspcc-dev/rfc6979 v0.2.0 // indirect github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.9.5 // indirect @@ -96,17 +94,18 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect github.com/twmb/murmur3 v1.1.5 // indirect github.com/urfave/cli v1.22.5 // indirect + go.mongodb.org/mongo-driver v1.11.4 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 // indirect go.opentelemetry.io/otel/sdk v1.14.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.4.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect gopkg.in/ini.v1 v1.67.0 // indirect lukechampine.com/blake3 v1.1.7 // indirect diff --git a/go.sum b/go.sum index f65a7aeab..29fef2509 100644 Binary files a/go.sum and b/go.sum differ diff --git a/internal/logs/logs.go b/internal/logs/logs.go new file mode 100644 index 000000000..742f6a8f7 --- /dev/null +++ b/internal/logs/logs.go @@ -0,0 +1,488 @@ +package logs + +// Common service logs. +const ( + ServingRequest = "serving request..." + OperationFinishedSuccessfully = "operation finished successfully" + OperationFinishedWithError = "operation finished with error" + + TryingToExecuteInContainer = "trying to execute in container..." + CouldNotGetCurrentEpochNumber = "could not get current epoch number" + ProcessEpoch = "process epoch" + ProcessingNode = "processing node..." + NoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" + InterruptPlacementIterationByContext = "interrupt placement iteration by context" + + Notification = "notification" +) + +const ( + InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go + InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go + InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go + InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go + InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go + InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go + InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go + InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go + InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go + InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go + InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go + InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go + InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go + InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go + InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go + InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go + InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go + InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go + InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go + InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go + InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go + InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go + NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go + NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go + PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go + PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go + PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go + PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go + PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go + PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go + PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go + PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go + PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go + PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go + PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go + PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go + ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go + ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go + ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go + ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go + TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go + TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go + TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go + TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go + TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go + TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go + TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go + TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go + TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go + TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go + TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go + TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go + TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go + TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go + TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go + TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go + TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go + PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go + PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go + PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go + ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go + DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go + DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go + DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go + DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go + DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go + GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go + GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go + GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go + GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go + GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go + GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go + GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go + GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go + GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go + GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go + GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go + GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go + GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go + GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go + PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go + SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go + SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go + SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go + SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go + SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go + SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go + UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go + UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go + V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go + V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go + NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go + NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go + NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go + ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go + RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go + ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go + ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go + ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go + ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go + ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go + ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go + ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go + ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go + ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go + ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go + ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go + ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go + ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go + ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go + ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go + ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go + ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go + EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go + EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go + EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go + EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go + EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go + EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go + EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go + EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go + EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go + EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go + EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go + EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go + EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go + EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go + EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go + EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go + EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go + EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go + EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go + EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go + EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go + EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go + EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go + EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go + EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go + SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go + SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go + BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go + BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go + BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go + BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go + BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go + EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go + EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go + EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go + EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go + EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go + EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go + EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go + EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go + EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go + EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go + EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go + EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go + EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go + EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go + EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go + EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go + EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go + EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go + EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" // Info in ../node/pkg/local_object_storage/engine/evacuate.go + EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" // Error in ../node/pkg/local_object_storage/engine/evacuate.go + EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go + MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go + MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go + MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go + MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go + ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go + ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go + ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go + ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go + ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go + ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go + ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go + ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go + ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go + ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go + ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go + ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go + ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go + ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go + ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go + ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go + ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go + ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go + ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go + WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go + WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go + WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go + WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go + WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go + WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go + BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go + BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go + BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go + BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go + BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go + BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go + BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go + AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go + AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go + AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go + AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go + BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go + BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go + BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go + BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go + ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go + ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go + ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go + ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go + ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go + ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go + ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go + ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go + FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go + FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go + FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go + FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go + FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go + FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go + FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go + GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go + GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go + GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go + GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go + NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go + NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go + NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go + NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go + NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go + NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go // Debug in ../node/pkg/innerring/processors/reputation/processor.go + FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go + FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go + FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go + FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" // Debug in ../node/pkg/morph/client/constructor.go + FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" // Info in ../node/pkg/morph/client/constructor.go + FrostFSIRReloadExtraWallets = "reload extra wallets" // Info in ../node/cmd/frostfs-ir/config.go + FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go + FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go + FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go + FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go + FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go + FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go + FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go + FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go + FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go + FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go + FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go + FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go + FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go + FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go + FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go + FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go + FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go + FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go + FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go + CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go + ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started" + ShardGCCollectingExpiredObjectsCompleted = "collecting expired objects completed" + ShardGCCollectingExpiredLocksStarted = "collecting expired locks started" + ShardGCCollectingExpiredLocksCompleted = "collecting expired locks completed" + ShardGCRemoveGarbageStarted = "garbage remove started" + ShardGCRemoveGarbageCompleted = "garbage remove completed" + ShardGCFailedToGetExpiredWithLinked = "failed to get expired objects with linked" +) diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go index d74adddcc..bd186006e 100644 --- a/pkg/core/client/client.go +++ b/pkg/core/client/client.go @@ -19,8 +19,6 @@ type Client interface { ObjectSearchInit(context.Context, client.PrmObjectSearch) (*client.ObjectListReader, error) ObjectRangeInit(context.Context, client.PrmObjectRange) (*client.ObjectRangeReader, error) ObjectHash(context.Context, client.PrmObjectHash) (*client.ResObjectHash, error) - AnnounceLocalTrust(context.Context, client.PrmAnnounceLocalTrust) (*client.ResAnnounceLocalTrust, error) - AnnounceIntermediateTrust(context.Context, client.PrmAnnounceIntermediateTrust) (*client.ResAnnounceIntermediateTrust, error) ExecRaw(f func(client *rawclient.Client) error) error Close() error } @@ -32,7 +30,7 @@ type MultiAddressClient interface { // RawForAddress must return rawclient.Client // for the passed network.Address. - RawForAddress(network.Address, func(cli *rawclient.Client) error) error + RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error ReportError(error) } diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 33373b7cc..946cfc462 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -1,6 +1,7 @@ package object import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -13,7 +14,6 @@ import ( frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) @@ -42,7 +42,7 @@ type DeleteHandler interface { // LockSource is a source of lock relations between the objects. type LockSource interface { // IsLocked must clarify object's lock status. - IsLocked(address oid.Address) (bool, error) + IsLocked(ctx context.Context, address oid.Address) (bool, error) } // Locker is an object lock storage interface. @@ -64,8 +64,6 @@ var errNoExpirationEpoch = errors.New("missing expiration epoch attribute") var errTombstoneExpiration = errors.New("tombstone body and header contain different expiration values") -var errEmptySGMembers = errors.New("storage group with empty members list") - func defaultCfg() *cfg { return new(cfg) } @@ -89,7 +87,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator { // If unprepared is true, only fields set by user are validated. // // Returns nil error if the object has valid structure. -func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error { +func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unprepared bool) error { if obj == nil { return errNilObject } @@ -117,7 +115,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error { return fmt.Errorf("(%T) could not validate signature key: %w", v, err) } - if err := v.checkExpiration(obj); err != nil { + if err := v.checkExpiration(ctx, obj); err != nil { return fmt.Errorf("object did not pass expiration check: %w", err) } @@ -128,7 +126,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error { if obj = obj.Parent(); obj != nil { // Parent object already exists. - return v.Validate(obj, false) + return v.Validate(ctx, obj, false) } return nil @@ -179,7 +177,6 @@ func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) // ContentMeta describes FrostFS meta information that brings object's payload if the object // is one of: // - object.TypeTombstone; -// - object.TypeStorageGroup; // - object.TypeLock. type ContentMeta struct { typ object.Type @@ -195,7 +192,6 @@ func (i ContentMeta) Type() object.Type { // Objects returns objects that the original object's payload affects: // - inhumed objects, if the original object is a Tombstone; // - locked objects, if the original object is a Lock; -// - members of a storage group, if the original object is a Storage group; // - nil, if the original object is a Regular object. func (i ContentMeta) Objects() []oid.ID { return i.objs @@ -212,10 +208,6 @@ func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error) if err := v.fillAndValidateTombstoneMeta(o, &meta); err != nil { return ContentMeta{}, err } - case object.TypeStorageGroup: - if err := v.fillAndValidateStorageGroupMeta(o, &meta); err != nil { - return ContentMeta{}, err - } case object.TypeLock: if err := v.fillAndValidateLockMeta(o, &meta); err != nil { return ContentMeta{}, err @@ -265,37 +257,6 @@ func (v *FormatValidator) fillAndValidateLockMeta(o *object.Object, meta *Conten return nil } -func (v *FormatValidator) fillAndValidateStorageGroupMeta(o *object.Object, meta *ContentMeta) error { - if len(o.Payload()) == 0 { - return fmt.Errorf("(%T) empty payload in storage group", v) - } - - var sg storagegroup.StorageGroup - - if err := sg.Unmarshal(o.Payload()); err != nil { - return fmt.Errorf("(%T) could not unmarshal storage group content: %w", v, err) - } - - mm := sg.Members() - meta.objs = mm - - lenMM := len(mm) - if lenMM == 0 { - return errEmptySGMembers - } - - uniqueFilter := make(map[oid.ID]struct{}, lenMM) - - for i := 0; i < lenMM; i++ { - if _, alreadySeen := uniqueFilter[mm[i]]; alreadySeen { - return fmt.Errorf("storage group contains non-unique member: %s", mm[i]) - } - - uniqueFilter[mm[i]] = struct{}{} - } - return nil -} - func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *ContentMeta) error { if len(o.Payload()) == 0 { return fmt.Errorf("(%T) empty payload in tombstone", v) @@ -327,7 +288,7 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *C var errExpired = errors.New("object has expired") -func (v *FormatValidator) checkExpiration(obj *object.Object) error { +func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Object) error { exp, err := expirationEpochAttribute(obj) if err != nil { if errors.Is(err, errNoExpirationEpoch) { @@ -348,7 +309,7 @@ func (v *FormatValidator) checkExpiration(obj *object.Object) error { addr.SetContainer(cID) addr.SetObject(oID) - locked, err := v.e.IsLocked(addr) + locked, err := v.e.IsLocked(ctx, addr) if err != nil { return fmt.Errorf("locking status check for an expired object: %w", err) } diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 563c7827d..2cf5099ba 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -1,6 +1,7 @@ package object import ( + "context" "crypto/ecdsa" "strconv" "testing" @@ -11,7 +12,6 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/stretchr/testify/require" @@ -40,7 +40,7 @@ type testLockSource struct { m map[oid.Address]bool } -func (t testLockSource) IsLocked(address oid.Address) (bool, error) { +func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) { return t.m[address], nil } @@ -62,20 +62,20 @@ func TestFormatValidator_Validate(t *testing.T) { require.NoError(t, err) t.Run("nil input", func(t *testing.T) { - require.Error(t, v.Validate(nil, true)) + require.Error(t, v.Validate(context.Background(), nil, true)) }) t.Run("nil identifier", func(t *testing.T) { obj := object.New() - require.ErrorIs(t, v.Validate(obj, false), errNilID) + require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID) }) t.Run("nil container identifier", func(t *testing.T) { obj := object.New() obj.SetID(oidtest.ID()) - require.ErrorIs(t, v.Validate(obj, true), errNilCID) + require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID) }) t.Run("unsigned object", func(t *testing.T) { @@ -83,7 +83,7 @@ func TestFormatValidator_Validate(t *testing.T) { obj.SetContainerID(cidtest.ID()) obj.SetID(oidtest.ID()) - require.Error(t, v.Validate(obj, false)) + require.Error(t, v.Validate(context.Background(), obj, false)) }) t.Run("correct w/ session token", func(t *testing.T) { @@ -101,7 +101,7 @@ func TestFormatValidator_Validate(t *testing.T) { require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj)) - require.NoError(t, v.Validate(obj, false)) + require.NoError(t, v.Validate(context.Background(), obj, false)) }) t.Run("correct w/o session token", func(t *testing.T) { @@ -109,7 +109,7 @@ func TestFormatValidator_Validate(t *testing.T) { require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj)) - require.NoError(t, v.Validate(obj, false)) + require.NoError(t, v.Validate(context.Background(), obj, false)) }) t.Run("tombstone content", func(t *testing.T) { @@ -166,59 +166,6 @@ func TestFormatValidator_Validate(t *testing.T) { require.Equal(t, object.TypeTombstone, contentGot.Type()) }) - t.Run("storage group content", func(t *testing.T) { - obj := object.New() - obj.SetType(object.TypeStorageGroup) - - t.Run("empty payload", func(t *testing.T) { - _, err := v.ValidateContent(obj) - require.Error(t, err) - }) - - var content storagegroup.StorageGroup - content.SetExpirationEpoch(1) // some non-default value - - t.Run("empty members", func(t *testing.T) { - data, err := content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - _, err = v.ValidateContent(obj) - require.ErrorIs(t, err, errEmptySGMembers) - }) - - t.Run("non-unique members", func(t *testing.T) { - id := oidtest.ID() - - content.SetMembers([]oid.ID{id, id}) - - data, err := content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - _, err = v.ValidateContent(obj) - require.Error(t, err) - }) - - t.Run("correct SG", func(t *testing.T) { - ids := []oid.ID{oidtest.ID(), oidtest.ID()} - content.SetMembers(ids) - - data, err := content.Marshal() - require.NoError(t, err) - - obj.SetPayload(data) - - content, err := v.ValidateContent(obj) - require.NoError(t, err) - - require.EqualValues(t, ids, content.Objects()) - require.Equal(t, object.TypeStorageGroup, content.Type()) - }) - }) - t.Run("expiration", func(t *testing.T) { fn := func(val string) *object.Object { obj := blankValidObject(&ownerKey.PrivateKey) @@ -236,7 +183,7 @@ func TestFormatValidator_Validate(t *testing.T) { t.Run("invalid attribute value", func(t *testing.T) { val := "text" - err := v.Validate(fn(val), false) + err := v.Validate(context.Background(), fn(val), false) require.Error(t, err) }) @@ -245,7 +192,7 @@ func TestFormatValidator_Validate(t *testing.T) { obj := fn(val) t.Run("non-locked", func(t *testing.T) { - err := v.Validate(obj, false) + err := v.Validate(context.Background(), obj, false) require.ErrorIs(t, err, errExpired) }) @@ -258,14 +205,14 @@ func TestFormatValidator_Validate(t *testing.T) { addr.SetObject(oID) ls.m[addr] = true - err := v.Validate(obj, false) + err := v.Validate(context.Background(), obj, false) require.NoError(t, err) }) }) t.Run("alive object", func(t *testing.T) { val := strconv.FormatUint(curEpoch, 10) - err := v.Validate(fn(val), true) + err := v.Validate(context.Background(), fn(val), true) require.NoError(t, err) }) }) diff --git a/pkg/core/storagegroup/storagegroup.go b/pkg/core/storagegroup/storagegroup.go deleted file mode 100644 index b16e5c61a..000000000 --- a/pkg/core/storagegroup/storagegroup.go +++ /dev/null @@ -1,70 +0,0 @@ -package storagegroup - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" -) - -// SearchSGPrm groups the parameters which are formed by Processor to search the storage group objects. -type SearchSGPrm struct { - Container cid.ID - - NodeInfo client.NodeInfo -} - -// SearchSGDst groups the target values which Processor expects from SG searching to process. -type SearchSGDst struct { - Objects []oid.ID -} - -// GetSGPrm groups parameter of GetSG operation. -type GetSGPrm struct { - OID oid.ID - CID cid.ID - - NetMap netmap.NetMap - Container [][]netmap.NodeInfo -} - -// SGSource is a storage group information source interface. -type SGSource interface { - // ListSG must list storage group objects in the container. Formed list must be written to destination. - // - // Must return any error encountered which did not allow to form the list. - ListSG(context.Context, *SearchSGDst, SearchSGPrm) error - - // GetSG must return storage group object for the provided CID, OID, - // container and netmap state. - GetSG(context.Context, GetSGPrm) (*storagegroup.StorageGroup, error) -} - -// StorageGroup combines storage group object ID and its structure. -type StorageGroup struct { - id oid.ID - sg storagegroup.StorageGroup -} - -// ID returns object ID of the storage group. -func (s StorageGroup) ID() oid.ID { - return s.id -} - -// SetID sets an object ID of the storage group. -func (s *StorageGroup) SetID(id oid.ID) { - s.id = id -} - -// StorageGroup returns the storage group descriptor. -func (s StorageGroup) StorageGroup() storagegroup.StorageGroup { - return s.sg -} - -// SetStorageGroup sets a storage group descriptor. -func (s *StorageGroup) SetStorageGroup(sg storagegroup.StorageGroup) { - s.sg = sg -} diff --git a/pkg/innerring/alphabet.go b/pkg/innerring/alphabet.go index 78930db7d..ddb344403 100644 --- a/pkg/innerring/alphabet.go +++ b/pkg/innerring/alphabet.go @@ -104,13 +104,13 @@ func (l GlagoliticLetter) String() string { return "unknown" } -type alphabetContracts map[GlagoliticLetter]util.Uint160 +type AlphabetContracts map[GlagoliticLetter]util.Uint160 -func newAlphabetContracts() alphabetContracts { +func NewAlphabetContracts() AlphabetContracts { return make(map[GlagoliticLetter]util.Uint160, lastLetterNum) } -func (a alphabetContracts) GetByIndex(ind int) (util.Uint160, bool) { +func (a AlphabetContracts) GetByIndex(ind int) (util.Uint160, bool) { if ind < 0 || ind >= int(lastLetterNum) { return util.Uint160{}, false } @@ -120,16 +120,16 @@ func (a alphabetContracts) GetByIndex(ind int) (util.Uint160, bool) { return contract, ok } -func (a alphabetContracts) indexOutOfRange(ind int) bool { +func (a AlphabetContracts) indexOutOfRange(ind int) bool { return ind < 0 && ind >= len(a) } -func (a alphabetContracts) iterate(f func(GlagoliticLetter, util.Uint160)) { +func (a AlphabetContracts) iterate(f func(GlagoliticLetter, util.Uint160)) { for letter, contract := range a { f(letter, contract) } } -func (a *alphabetContracts) set(l GlagoliticLetter, h util.Uint160) { +func (a *AlphabetContracts) set(l GlagoliticLetter, h util.Uint160) { (*a)[l] = h } diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go index 0e10125c3..c4de07a5f 100644 --- a/pkg/innerring/bindings.go +++ b/pkg/innerring/bindings.go @@ -12,7 +12,6 @@ type ( ListenerNotificationHandlers() []event.NotificationHandlerInfo ListenerNotaryParsers() []event.NotaryParserInfo ListenerNotaryHandlers() []event.NotaryHandlerInfo - TimersHandlers() []event.NotificationHandlerInfo } ) diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index eb74e44d4..ad69f207b 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -3,11 +3,10 @@ package innerring import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -24,14 +23,12 @@ type ( IsAlphabet() bool } - subEpochEventHandler struct { - handler event.Handler // handle to execute - durationMul uint32 // X: X/Y of epoch in blocks - durationDiv uint32 // Y: X/Y of epoch in blocks - } - newEpochHandler func() + containerEstimationStopper interface { + StopEstimation(p container.StopEstimationPrm) error + } + epochTimerArgs struct { l *logger.Logger @@ -39,14 +36,11 @@ type ( newEpochHandlers []newEpochHandler - cnrWrapper *container.Client // to invoke stop container estimation - epoch epochState // to specify which epoch to stop, and epoch duration + cnrWrapper containerEstimationStopper // to invoke stop container estimation + epoch epochState // to specify which epoch to stop, and epoch duration stopEstimationDMul uint32 // X: X/Y of epoch in blocks stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks - - collectBasicIncome subEpochEventHandler - distributeBasicIncome subEpochEventHandler } emitTimerArgs struct { @@ -98,7 +92,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { args.stopEstimationDDiv, func() { if !args.alphabetState.IsAlphabet() { - args.l.Debug("non-alphabet mode, do not stop container estimations") + args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations) return } @@ -112,40 +106,12 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { err := args.cnrWrapper.StopEstimation(prm) if err != nil { - args.l.Warn("can't stop epoch estimation", + args.l.Warn(logs.InnerringCantStopEpochEstimation, zap.Uint64("epoch", epochN), zap.String("error", err.Error())) } }) - epochTimer.OnDelta( - args.collectBasicIncome.durationMul, - args.collectBasicIncome.durationDiv, - func() { - epochN := args.epoch.EpochCounter() - if epochN == 0 { // estimates are invalid in genesis epoch - return - } - - args.collectBasicIncome.handler( - settlement.NewBasicIncomeCollectEvent(epochN - 1), - ) - }) - - epochTimer.OnDelta( - args.distributeBasicIncome.durationMul, - args.distributeBasicIncome.durationDiv, - func() { - epochN := args.epoch.EpochCounter() - if epochN == 0 { // estimates are invalid in genesis epoch - return - } - - args.distributeBasicIncome.handler( - settlement.NewBasicIncomeDistributeEvent(epochN - 1), - ) - }) - return epochTimer } diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go new file mode 100644 index 000000000..224aa5c58 --- /dev/null +++ b/pkg/innerring/blocktimer_test.go @@ -0,0 +1,122 @@ +package innerring + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "github.com/stretchr/testify/require" +) + +func TestEpochTimer(t *testing.T) { + t.Parallel() + alphaState := &testAlphabetState{isAlphabet: true} + neh := &testNewEpochHandler{} + cnrStopper := &testContainerEstStopper{} + epochState := &testEpochState{ + counter: 99, + duration: 10, + } + + args := &epochTimerArgs{ + l: test.NewLogger(t, true), + alphabetState: alphaState, + newEpochHandlers: []newEpochHandler{neh.Handle}, + cnrWrapper: cnrStopper, + epoch: epochState, + stopEstimationDMul: 2, + stopEstimationDDiv: 10, + } + et := newEpochTimer(args) + err := et.Reset() + require.NoError(t, err, "failed to reset timer") + + et.Tick(100) + require.Equal(t, 0, neh.called, "invalid new epoch handler calls") + require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(101) + require.Equal(t, 0, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(102) + require.Equal(t, 0, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(103) + require.Equal(t, 0, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + + var h uint32 + for h = 104; h < 109; h++ { + et.Tick(h) + require.Equal(t, 0, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + } + + et.Tick(109) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(110) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(111) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(112) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") + + et.Tick(113) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") + + for h = 114; h < 119; h++ { + et.Tick(h) + require.Equal(t, 1, neh.called, "invalid new epoch handler calls") + require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") + } + et.Tick(120) + require.Equal(t, 2, neh.called, "invalid new epoch handler calls") + require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testNewEpochHandler struct { + called int +} + +func (h *testNewEpochHandler) Handle() { + h.called++ +} + +type testContainerEstStopper struct { + called int +} + +func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error { + s.called++ + return nil +} + +type testEpochState struct { + counter uint64 + duration uint64 +} + +func (s *testEpochState) EpochCounter() uint64 { + return s.counter +} +func (s *testEpochState) EpochDuration() uint64 { + return s.duration +} diff --git a/pkg/innerring/config/fee_test.go b/pkg/innerring/config/fee_test.go new file mode 100644 index 000000000..a0c56aac1 --- /dev/null +++ b/pkg/innerring/config/fee_test.go @@ -0,0 +1,68 @@ +package config + +import ( + "strings" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +func TestConfig(t *testing.T) { + t.Parallel() + t.Run("all set", func(t *testing.T) { + t.Parallel() + file := strings.NewReader( + ` +fee: + main_chain: 50000000 + side_chain: 200000000 + named_container_register: 2500000000 +`, + ) + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + config := NewFeeConfig(v) + require.Equal(t, fixedn.Fixed8(50000000), config.MainChainFee(), "main chain fee invalid") + require.Equal(t, fixedn.Fixed8(200000000), config.SideChainFee(), "side chain fee invalid") + require.Equal(t, fixedn.Fixed8(2500000000), config.NamedContainerRegistrationFee(), "named container register fee invalid") + }) + + t.Run("nothing set", func(t *testing.T) { + t.Parallel() + file := strings.NewReader("") + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + config := NewFeeConfig(v) + require.Equal(t, fixedn.Fixed8(0), config.MainChainFee(), "main chain fee invalid") + require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid") + require.Equal(t, fixedn.Fixed8(0), config.NamedContainerRegistrationFee(), "named container register fee invalid") + }) + + t.Run("partially set", func(t *testing.T) { + t.Parallel() + file := strings.NewReader( + ` +fee: + main_chain: 10 +`, + ) + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + config := NewFeeConfig(v) + require.Equal(t, fixedn.Fixed8(10), config.MainChainFee(), "main chain fee invalid") + require.Equal(t, fixedn.Fixed8(0), config.SideChainFee(), "side chain fee invalid") + require.Equal(t, fixedn.Fixed8(0), config.NamedContainerRegistrationFee(), "named container register fee invalid") + }) + +} diff --git a/pkg/innerring/contracts.go b/pkg/innerring/contracts.go index c280eb4bf..55c2ff582 100644 --- a/pkg/innerring/contracts.go +++ b/pkg/innerring/contracts.go @@ -15,17 +15,14 @@ type contracts struct { netmap util.Uint160 // in morph balance util.Uint160 // in morph container util.Uint160 // in morph - audit util.Uint160 // in morph proxy util.Uint160 // in morph processing util.Uint160 // in mainnet - reputation util.Uint160 // in morph - subnet util.Uint160 // in morph frostfsID util.Uint160 // in morph - alphabet alphabetContracts // in morph + alphabet AlphabetContracts // in morph } -func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, withoutMainNotary, withoutSideNotary bool) (*contracts, error) { +func parseContracts(cfg *viper.Viper, morph nnsResolver, withoutMainNet, withoutMainNotary, withoutSideNotary bool) (*contracts, error) { var ( result = new(contracts) err error @@ -60,9 +57,6 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with {"contracts.netmap", client.NNSNetmapContractName, &result.netmap}, {"contracts.balance", client.NNSBalanceContractName, &result.balance}, {"contracts.container", client.NNSContainerContractName, &result.container}, - {"contracts.audit", client.NNSAuditContractName, &result.audit}, - {"contracts.reputation", client.NNSReputationContractName, &result.reputation}, - {"contracts.subnet", client.NNSSubnetworkContractName, &result.subnet}, {"contracts.frostfsid", client.NNSFrostFSIDContractName, &result.frostfsID}, } @@ -82,9 +76,9 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with return result, nil } -func parseAlphabetContracts(cfg *viper.Viper, morph *client.Client) (alphabetContracts, error) { +func parseAlphabetContracts(cfg *viper.Viper, morph nnsResolver) (AlphabetContracts, error) { num := GlagoliticLetter(cfg.GetUint("contracts.alphabet.amount")) - alpha := newAlphabetContracts() + alpha := NewAlphabetContracts() if num > lastLetterNum { return nil, fmt.Errorf("amount of alphabet contracts overflows glagolitsa %d > %d", num, lastLetterNum) @@ -121,7 +115,7 @@ func parseAlphabetContracts(cfg *viper.Viper, morph *client.Client) (alphabetCon return alpha, nil } -func parseContract(cfg *viper.Viper, morph *client.Client, cfgName, nnsName string) (res util.Uint160, err error) { +func parseContract(cfg *viper.Viper, morph nnsResolver, cfgName, nnsName string) (res util.Uint160, err error) { contractStr := cfg.GetString(cfgName) if len(contractStr) == 0 { return morph.NNSContractAddress(nnsName) @@ -129,3 +123,7 @@ func parseContract(cfg *viper.Viper, morph *client.Client, cfgName, nnsName stri return util.Uint160DecodeStringLE(contractStr) } + +type nnsResolver interface { + NNSContractAddress(name string) (sh util.Uint160, err error) +} diff --git a/pkg/innerring/contracts_test.go b/pkg/innerring/contracts_test.go new file mode 100644 index 000000000..e04e22f06 --- /dev/null +++ b/pkg/innerring/contracts_test.go @@ -0,0 +1,213 @@ +package innerring + +import ( + "strings" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +func TestParseContractsSuccess(t *testing.T) { + t.Parallel() + file := strings.NewReader(` +contracts: + frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 + processing: 597f5894867113a41e192801709c02497f611de8 + audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f + balance: d2aa48d14b17b11bc4c68205027884a96706dd16 + container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 + frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a + netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 + proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c + alphabet: + amount: 2 + az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea + buky: e2ba789320899658b100f331bdebb74474757920 +`) + + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + t.Run("all enabled", func(t *testing.T) { + t.Parallel() + c, err := parseContracts(v, nil, false, false, false) + require.NoError(t, err, "failed to parse contracts") + + frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62") + require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs") + + processingExp, _ := util.Uint160DecodeStringLE("597f5894867113a41e192801709c02497f611de8") + require.Equal(t, processingExp, c.processing, "invalid processing") + + balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") + require.Equal(t, balanceExp, c.balance, "invalid balance") + + containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") + require.Equal(t, containerExp, c.container, "invalid container") + + frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") + require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") + + netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") + require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") + + proxyExp, _ := util.Uint160DecodeStringLE("abc8794bb40a21f2db5f21ae62741eb46c8cad1c") + require.Equal(t, proxyExp, c.proxy, "invalid proxy") + + require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") + + azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") + require.Equal(t, azExp, c.alphabet[az], "invalid az") + + bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") + require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") + }) + + t.Run("all disabled", func(t *testing.T) { + t.Parallel() + c, err := parseContracts(v, nil, true, true, true) + require.NoError(t, err, "failed to parse contracts") + + require.Equal(t, util.Uint160{}, c.frostfs, "invalid frostfs") + + require.Equal(t, util.Uint160{}, c.processing, "invalid processing") + + balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") + require.Equal(t, balanceExp, c.balance, "invalid balance") + + containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") + require.Equal(t, containerExp, c.container, "invalid container") + + frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") + require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") + + netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") + require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") + + require.Equal(t, util.Uint160{}, c.proxy, "invalid proxy") + + require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") + + azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") + require.Equal(t, azExp, c.alphabet[az], "invalid az") + + bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") + require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") + }) + + t.Run("main notary & side notary disabled", func(t *testing.T) { + t.Parallel() + c, err := parseContracts(v, nil, false, true, true) + require.NoError(t, err, "failed to parse contracts") + + frostfsExp, _ := util.Uint160DecodeStringLE("ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62") + require.Equal(t, frostfsExp, c.frostfs, "invalid frostfs") + + require.Equal(t, util.Uint160{}, c.processing, "invalid processing") + + balanceExp, _ := util.Uint160DecodeStringLE("d2aa48d14b17b11bc4c68205027884a96706dd16") + require.Equal(t, balanceExp, c.balance, "invalid balance") + + containerExp, _ := util.Uint160DecodeStringLE("ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6") + require.Equal(t, containerExp, c.container, "invalid container") + + frostfsIDExp, _ := util.Uint160DecodeStringLE("9f5866decbc751a099e74c7c7bc89f609201755a") + require.Equal(t, frostfsIDExp, c.frostfsID, "invalid frostfsID") + + netmapIDExp, _ := util.Uint160DecodeStringLE("83c600c81d47a1b1b7cf58eb49ae7ee7240dc742") + require.Equal(t, netmapIDExp, c.netmap, "invalid netmap") + + require.Equal(t, util.Uint160{}, c.proxy, "invalid proxy") + + require.Equal(t, 2, len(c.alphabet), "invalid alphabet contracts length") + + azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") + require.Equal(t, azExp, c.alphabet[az], "invalid az") + + bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") + require.Equal(t, bukyExp, c.alphabet[buky], "invalid buky") + }) +} + +func TestParseContractsInvalid(t *testing.T) { + t.Parallel() + t.Run("invalid audit contract", func(t *testing.T) { + t.Parallel() + file := strings.NewReader(` +contracts: + frostfs: invalid_data + processing: 597f5894867113a41e192801709c02497f611de8 + audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f + balance: d2aa48d14b17b11bc4c68205027884a96706dd16 + container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 + frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a + netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 + proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c + alphabet: + amount: 2 + az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea + buky: e2ba789320899658b100f331bdebb74474757920 +`) + + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + _, err = parseContracts(v, nil, false, false, false) + require.Error(t, err, "unexpected success") + }) + + t.Run("invalid alphabet count", func(t *testing.T) { + t.Parallel() + file := strings.NewReader(` +contracts: + frostfs: ee3dee6d05dc79c24a5b8f6985e10d68b7cacc62 + processing: 597f5894867113a41e192801709c02497f611de8 + audit: 219e37aed2180b87e7fe945dbf97d67125e8d73f + balance: d2aa48d14b17b11bc4c68205027884a96706dd16 + container: ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6 + frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a + netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 + proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c + alphabet: + amount: 3 + az: c1d211fceeb4b1dc76b8e4054d11fdf887e418ea + buky: e2ba789320899658b100f331bdebb74474757920 +`) + + v := viper.New() + v.SetConfigType("yaml") + err := v.ReadConfig(file) + require.NoError(t, err, "read config file failed") + + azExp, _ := util.Uint160DecodeStringLE("c1d211fceeb4b1dc76b8e4054d11fdf887e418ea") + bukyExp, _ := util.Uint160DecodeStringLE("e2ba789320899658b100f331bdebb74474757920") + + morph := &testParserMorph{ + values: map[string]util.Uint160{ + "az": azExp, + "buky": bukyExp, + }, + } + + _, err = parseContracts(v, morph, false, false, false) + require.ErrorContains(t, err, "could not read all contracts: required 3, read 2", "unexpected success") + }) +} + +type testParserMorph struct { + values map[string]util.Uint160 +} + +func (m *testParserMorph) NNSContractAddress(name string) (sh util.Uint160, err error) { + if value, found := m.values[name]; found { + return value, nil + } + return util.Uint160{}, client.ErrNNSRecordNotFound +} diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go new file mode 100644 index 000000000..493ae92de --- /dev/null +++ b/pkg/innerring/indexer_test.go @@ -0,0 +1,225 @@ +package innerring + +import ( + "fmt" + "testing" + "time" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func TestIndexerReturnsIndexes(t *testing.T) { + t.Parallel() + commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{ + "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae", + "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131", + }) + require.NoError(t, err, "convert string to commitee public keys failed") + cf := &testCommiteeFetcher{ + keys: commiteeKeys, + } + + irKeys, err := keys.NewPublicKeysFromStrings([]string{ + "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35", + "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3", + "022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131", + }) + require.NoError(t, err, "convert string to IR public keys failed") + irf := &testIRFetcher{ + keys: irKeys, + } + + t.Run("success", func(t *testing.T) { + t.Parallel() + key := irKeys[2] + + indexer := newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err := indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(1), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(2), idx, "invalid IR index") + + size, err := indexer.InnerRingSize() + require.NoError(t, err, "failed to get IR size") + require.Equal(t, int32(3), size, "invalid IR size") + }) + + t.Run("not found alphabet", func(t *testing.T) { + t.Parallel() + key := irKeys[0] + + indexer := newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err := indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(-1), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(0), idx, "invalid IR index") + }) + + t.Run("not found IR", func(t *testing.T) { + t.Parallel() + key := commiteeKeys[0] + + indexer := newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err := indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(0), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(-1), idx, "invalid IR index") + }) +} + +func TestIndexerCachesIndexes(t *testing.T) { + t.Parallel() + commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{}) + require.NoError(t, err, "convert string to commitee public keys failed") + cf := &testCommiteeFetcher{ + keys: commiteeKeys, + } + + irKeys, err := keys.NewPublicKeysFromStrings([]string{}) + require.NoError(t, err, "convert string to IR public keys failed") + irf := &testIRFetcher{ + keys: irKeys, + } + + key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131") + require.NoError(t, err, "convert string to public key failed") + + indexer := newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err := indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(-1), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(-1), idx, "invalid IR index") + + size, err := indexer.InnerRingSize() + require.NoError(t, err, "failed to get IR size") + require.Equal(t, int32(0), size, "invalid IR size") + + require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") + require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") + + idx, err = indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(-1), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(-1), idx, "invalid IR index") + + size, err = indexer.InnerRingSize() + require.NoError(t, err, "failed to get IR size") + require.Equal(t, int32(0), size, "invalid IR size") + + require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") + require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") + + time.Sleep(2 * time.Second) + + idx, err = indexer.AlphabetIndex() + require.NoError(t, err, "failed to get alphabet index") + require.Equal(t, int32(-1), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.NoError(t, err, "failed to get IR index") + require.Equal(t, int32(-1), idx, "invalid IR index") + + size, err = indexer.InnerRingSize() + require.NoError(t, err, "failed to get IR size") + require.Equal(t, int32(0), size, "invalid IR size") + + require.Equal(t, int32(2), cf.calls.Load(), "invalid commitee calls count") + require.Equal(t, int32(2), irf.calls.Load(), "invalid IR calls count") +} + +func TestIndexerThrowsErrors(t *testing.T) { + t.Parallel() + cf := &testCommiteeFetcher{ + err: fmt.Errorf("test commitee error"), + } + + irKeys, err := keys.NewPublicKeysFromStrings([]string{}) + require.NoError(t, err, "convert string to IR public keys failed") + irf := &testIRFetcher{ + keys: irKeys, + } + + key, err := keys.NewPublicKeyFromString("022bb4041c50d607ff871dec7e4cd7778388e0ea6849d84ccbd9aa8f32e16a8131") + require.NoError(t, err, "convert string to public key failed") + + indexer := newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err := indexer.AlphabetIndex() + require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") + require.Equal(t, int32(0), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") + require.Equal(t, int32(0), idx, "invalid IR index") + + size, err := indexer.InnerRingSize() + require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") + require.Equal(t, int32(0), size, "invalid IR size") + + commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{}) + require.NoError(t, err, "convert string to commitee public keys failed") + cf = &testCommiteeFetcher{ + keys: commiteeKeys, + } + + irf = &testIRFetcher{ + err: fmt.Errorf("test IR error"), + } + + indexer = newInnerRingIndexer(cf, irf, key, time.Second) + + idx, err = indexer.AlphabetIndex() + require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") + require.Equal(t, int32(0), idx, "invalid alphabet index") + + idx, err = indexer.InnerRingIndex() + require.ErrorContains(t, err, "test IR error", "error from IR not throwed") + require.Equal(t, int32(0), idx, "invalid IR index") + + size, err = indexer.InnerRingSize() + require.ErrorContains(t, err, "test IR error", "error from IR not throwed") + require.Equal(t, int32(0), size, "invalid IR size") +} + +type testCommiteeFetcher struct { + keys keys.PublicKeys + err error + calls atomic.Int32 +} + +func (f *testCommiteeFetcher) Committee() (keys.PublicKeys, error) { + f.calls.Inc() + return f.keys, f.err +} + +type testIRFetcher struct { + keys keys.PublicKeys + err error + calls atomic.Int32 +} + +func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { + f.calls.Inc() + return f.keys, f.err +} diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 8db6328a2..89269d50d 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -6,8 +6,8 @@ import ( "fmt" "net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance" cont "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs" @@ -16,30 +16,18 @@ import ( nodevalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation" addrvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/maddress" statevalidation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - subnetvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/subnet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement" - auditSettlement "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit" balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - audittask "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/taskmanager" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" - reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/panjf2000/ants/v2" "github.com/spf13/viper" "go.uber.org/zap" "google.golang.org/grpc" @@ -47,20 +35,12 @@ import ( func (s *Server) initNetmapProcessor(cfg *viper.Viper, cnrClient *container.Client, - alphaSync event.Handler, - subnetClient *morphsubnet.Client, - auditProcessor *audit.Processor, - settlementProcessor *settlement.Processor) error { + alphaSync event.Handler) error { locodeValidator, err := s.newLocodeValidator(cfg) if err != nil { return err } - subnetValidator, err := subnetvalidator.New( - subnetvalidator.Prm{ - SubnetClient: subnetClient, - }, - ) if err != nil { return err } @@ -73,22 +53,16 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, s.netmapProcessor, err = netmap.New(&netmap.Params{ Log: s.log, PoolSize: cfg.GetInt("workers.netmap"), - NetmapClient: s.netmapClient, + NetmapClient: netmap.NewNetmapClient(s.netmapClient), EpochTimer: s, EpochState: s, AlphabetState: s, CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"), CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"), ContainerWrapper: cnrClient, - HandleAudit: s.onlyActiveEventHandler( - auditProcessor.StartAuditHandler(), - ), NotaryDepositHandler: s.onlyAlphabetEventHandler( s.notaryHandler, ), - AuditSettlementsHandler: s.onlyAlphabetEventHandler( - settlementProcessor.HandleAuditEvent, - ), AlphabetSyncHandler: s.onlyAlphabetEventHandler( alphaSync, ), @@ -96,10 +70,8 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, &netMapCandidateStateValidator, addrvalidator.New(), locodeValidator, - subnetValidator, ), NotaryDisabled: s.sideNotaryConfig.disabled, - SubnetContract: &s.contracts.subnet, NodeStateSettings: netSettings, }) @@ -129,7 +101,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn("can't get last processed main chain block number", zap.String("error", err.Error())) + s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) } mainnetChain.from = fromMainChainBlock @@ -171,111 +143,24 @@ func (s *Server) enableNotarySupport() error { return nil } -func (s *Server) initNotaryConfig(cfg *viper.Viper) { +func (s *Server) initNotaryConfig() { s.mainNotaryConfig, s.sideNotaryConfig = notaryConfigs( s.morphClient.ProbeNotary(), !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too ) - s.log.Info("notary support", + s.log.Info(logs.InnerringNotarySupport, zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled), zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), ) } -func (s *Server) createAuditProcessor(cfg *viper.Viper, clientCache *ClientCache, cnrClient *container.Client) (*audit.Processor, error) { - auditPool, err := ants.NewPool(cfg.GetInt("audit.task.exec_pool_size")) - if err != nil { - return nil, err - } - - pdpPoolSize := cfg.GetInt("audit.pdp.pairs_pool_size") - porPoolSize := cfg.GetInt("audit.por.pool_size") - - // create audit processor dependencies - auditTaskManager := audittask.New( - audittask.WithQueueCapacity(cfg.GetUint32("audit.task.queue_capacity")), - audittask.WithWorkerPool(auditPool), - audittask.WithLogger(s.log), - audittask.WithContainerCommunicator(clientCache), - audittask.WithMaxPDPSleepInterval(cfg.GetDuration("audit.pdp.max_sleep_interval")), - audittask.WithPDPWorkerPoolGenerator(func() (util2.WorkerPool, error) { - return ants.NewPool(pdpPoolSize) - }), - audittask.WithPoRWorkerPoolGenerator(func() (util2.WorkerPool, error) { - return ants.NewPool(porPoolSize) - }), - ) - - s.workers = append(s.workers, auditTaskManager.Listen) - - // create audit processor - return audit.New(&audit.Params{ - Log: s.log, - NetmapClient: s.netmapClient, - ContainerClient: cnrClient, - IRList: s, - EpochSource: s, - SGSource: clientCache, - Key: &s.key.PrivateKey, - RPCSearchTimeout: cfg.GetDuration("audit.timeout.search"), - TaskManager: auditTaskManager, - Reporter: s, - }) -} - -func (s *Server) createSettlementProcessor(clientCache *ClientCache, cnrClient *container.Client) *settlement.Processor { - // create settlement processor dependencies - settlementDeps := settlementDeps{ - log: s.log, - cnrSrc: container.AsContainerSource(cnrClient), - auditClient: s.auditClient, - nmClient: s.netmapClient, - clientCache: clientCache, - balanceClient: s.balanceClient, - } - - settlementDeps.settlementCtx = auditSettlementContext - auditCalcDeps := &auditSettlementDeps{ - settlementDeps: settlementDeps, - } - - settlementDeps.settlementCtx = basicIncomeSettlementContext - basicSettlementDeps := &basicIncomeSettlementDeps{ - settlementDeps: settlementDeps, - cnrClient: cnrClient, - } - - auditSettlementCalc := auditSettlement.NewCalculator( - &auditSettlement.CalculatorPrm{ - ResultStorage: auditCalcDeps, - ContainerStorage: auditCalcDeps, - PlacementCalculator: auditCalcDeps, - SGStorage: auditCalcDeps, - AccountStorage: auditCalcDeps, - Exchanger: auditCalcDeps, - AuditFeeFetcher: s.netmapClient, - }, - auditSettlement.WithLogger(s.log), - ) - - // create settlement processor - return settlement.New( - settlement.Prm{ - AuditProcessor: (*auditSettlementCalculator)(auditSettlementCalc), - BasicIncome: &basicSettlementConstructor{dep: basicSettlementDeps}, - State: s, - }, - settlement.WithLogger(s.log), - ) -} - func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Client, irf irFetcher) (event.Handler, error) { var alphaSync event.Handler if s.withoutMainNet || cfg.GetBool("governance.disable") { alphaSync = func(event.Event) { - s.log.Debug("alphabet keys sync is disabled") + s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled) } } else { // create governance processor @@ -319,7 +204,7 @@ func (s *Server) createIRFetcher() irFetcher { return irf } -func (s *Server) initTimers(cfg *viper.Viper, processors *serverProcessors, morphClients *serverMorphClients) { +func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) { s.epochTimer = newEpochTimer(&epochTimerArgs{ l: s.log, alphabetState: s, @@ -328,58 +213,27 @@ func (s *Server) initTimers(cfg *viper.Viper, processors *serverProcessors, morp epoch: s, stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"), stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"), - collectBasicIncome: subEpochEventHandler{ - handler: processors.SettlementProcessor.HandleIncomeCollectionEvent, - durationMul: cfg.GetUint32("timers.collect_basic_income.mul"), - durationDiv: cfg.GetUint32("timers.collect_basic_income.div"), - }, - distributeBasicIncome: subEpochEventHandler{ - handler: processors.SettlementProcessor.HandleIncomeDistributionEvent, - durationMul: cfg.GetUint32("timers.distribute_basic_income.mul"), - durationDiv: cfg.GetUint32("timers.distribute_basic_income.div"), - }, }) s.addBlockTimer(s.epochTimer) // initialize emission timer emissionTimer := newEmissionTimer(&emitTimerArgs{ - ap: processors.AlphabetProcessor, + ap: s.alphabetProcessor, emitDuration: cfg.GetUint32("timers.emit"), }) s.addBlockTimer(emissionTimer) } -func (s *Server) createMorphSubnetClient() (*morphsubnet.Client, error) { - // initialize morph client of Subnet contract - clientMode := morphsubnet.NotaryAlphabet - - if s.sideNotaryConfig.disabled { - clientMode = morphsubnet.NonNotary - } - - subnetInitPrm := morphsubnet.InitPrm{} - subnetInitPrm.SetBaseClient(s.morphClient) - subnetInitPrm.SetContractAddress(s.contracts.subnet) - subnetInitPrm.SetMode(clientMode) - - subnetClient := &morphsubnet.Client{} - err := subnetClient.Init(subnetInitPrm) - if err != nil { - return nil, fmt.Errorf("could not initialize subnet client: %w", err) - } - return subnetClient, nil -} - -func (s *Server) initAlphabetProcessor(cfg *viper.Viper) (*alphabet.Processor, error) { +func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) if err != nil { - return nil, err + return err } // create alphabet processor - alphabetProcessor, err := alphabet.New(&alphabet.Params{ + s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, Log: s.log, PoolSize: cfg.GetInt("workers.alphabet"), @@ -390,19 +244,19 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) (*alphabet.Processor, e StorageEmission: cfg.GetUint64("emit.storage.amount"), }) if err != nil { - return nil, err + return err } - err = bindMorphProcessor(alphabetProcessor, s) + err = bindMorphProcessor(s.alphabetProcessor, s) if err != nil { - return nil, err + return err } - return alphabetProcessor, nil + return nil } func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, - frostfsIDClient *frostfsid.Client, subnetClient *morphsubnet.Client) error { + frostfsIDClient *frostfsid.Client) error { // container processor containerProcessor, err := cont.New(&cont.Params{ Log: s.log, @@ -412,7 +266,6 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C FrostFSIDClient: frostfsIDClient, NetworkState: s.netmapClient, NotaryDisabled: s.sideNotaryConfig.disabled, - SubnetClient: subnetClient, }) if err != nil { return err @@ -466,37 +319,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper, frostfsIDClient * return bindMainnetProcessor(frostfsProcessor, s) } -func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.Fixed8) error { - repClient, err := repClient.NewFromMorph(s.morphClient, s.contracts.reputation, sidechainFee, repClient.TryNotary(), repClient.AsAlphabet()) - if err != nil { - return err - } - - // create reputation processor - reputationProcessor, err := reputation.New(&reputation.Params{ - Log: s.log, - PoolSize: cfg.GetInt("workers.reputation"), - EpochState: s, - AlphabetState: s, - ReputationWrapper: repClient, - ManagerBuilder: reputationcommon.NewManagerBuilder( - reputationcommon.ManagersPrm{ - NetMapSource: s.netmapClient, - }, - ), - NotaryDisabled: s.sideNotaryConfig.disabled, - }) - if err != nil { - return err - } - - return bindMorphProcessor(reputationProcessor, s) -} - func (s *Server) initGRPCServer(cfg *viper.Viper) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info("no Control server endpoint specified, service is disabled") + s.log.Info(logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -520,7 +346,7 @@ func (s *Server) initGRPCServer(cfg *viper.Viper) error { p.SetPrivateKey(*s.key) p.SetHealthChecker(s) - controlSvc := controlsrv.New(p, + controlSvc := controlsrv.New(p, s.netmapClient, controlsrv.WithAllowedKeys(authKeys), ) @@ -544,10 +370,9 @@ func (s *Server) initGRPCServer(cfg *viper.Viper) error { } type serverMorphClients struct { - CnrClient *container.Client - FrostFSIDClient *frostfsid.Client - FrostFSClient *frostfsClient.Client - MorphSubnetClient *morphsubnet.Client + CnrClient *container.Client + FrostFSIDClient *frostfsid.Client + FrostFSClient *frostfsClient.Client } func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { @@ -555,12 +380,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { var err error fee := s.feeConfig.SideChainFee() - // do not use TryNotary() in audit wrapper - // audit operations do not require multisignatures - s.auditClient, err = auditClient.NewFromMorph(s.morphClient, s.contracts.audit, fee) - if err != nil { - return nil, err - } // form morph container client's options morphCnrOpts := make([]container.Option, 0, 3) @@ -603,24 +422,10 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { return nil, err } - result.MorphSubnetClient, err = s.createMorphSubnetClient() - if err != nil { - return nil, err - } - return result, nil } -type serverProcessors struct { - AlphabetProcessor *alphabet.Processor - SettlementProcessor *settlement.Processor -} - -func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) (*serverProcessors, error) { - result := &serverProcessors{} - - fee := s.feeConfig.SideChainFee() - +func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error { irf := s.createIRFetcher() s.statusIndex = newInnerRingIndexer( @@ -630,69 +435,44 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien cfg.GetDuration("indexer.cache_timeout"), ) - clientCache := newClientCache(&clientCacheParams{ - Log: s.log, - Key: &s.key.PrivateKey, - SGTimeout: cfg.GetDuration("audit.timeout.get"), - HeadTimeout: cfg.GetDuration("audit.timeout.head"), - RangeTimeout: cfg.GetDuration("audit.timeout.rangehash"), - AllowExternal: cfg.GetBool("audit.allow_external"), - }) - - s.registerNoErrCloser(clientCache.cache.CloseAll) - - // create audit processor - auditProcessor, err := s.createAuditProcessor(cfg, clientCache, morphClients.CnrClient) + alphaSync, err := s.createAlphaSync(cfg, morphClients.FrostFSClient, irf) if err != nil { - return nil, err + return err } - result.SettlementProcessor = s.createSettlementProcessor(clientCache, morphClients.CnrClient) - - var alphaSync event.Handler - alphaSync, err = s.createAlphaSync(cfg, morphClients.FrostFSClient, irf) + err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync) if err != nil { - return nil, err + return err } - err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync, morphClients.MorphSubnetClient, auditProcessor, result.SettlementProcessor) + err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) if err != nil { - return nil, err - } - - err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient, morphClients.MorphSubnetClient) - if err != nil { - return nil, err + return err } err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient) if err != nil { - return nil, err + return err } err = s.initFrostFSMainnetProcessor(cfg, morphClients.FrostFSIDClient) if err != nil { - return nil, err + return err } - result.AlphabetProcessor, err = s.initAlphabetProcessor(cfg) + err = s.initAlphabetProcessor(cfg) if err != nil { - return nil, err + return err } - err = s.initReputationProcessor(cfg, fee) - if err != nil { - return nil, err - } - - return result, nil + return nil } func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- error) (*chainParams, error) { fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } morphChain := &chainParams{ @@ -715,7 +495,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- return nil, err } if err := s.morphClient.SetGroupSignerScope(); err != nil { - morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err)) + morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) } return morphChain, nil @@ -748,11 +528,3 @@ func (s *Server) initKey(cfg *viper.Viper) error { s.key = acc.PrivateKey() return nil } - -func (s *Server) initMetrics(cfg *viper.Viper) { - if cfg.GetString("prometheus.address") == "" { - return - } - m := metrics.NewInnerRingMetrics() - s.metrics = &m -} diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index a91d2fd0d..8c8c13dc3 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -6,13 +6,14 @@ import ( "fmt" "io" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit" balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -51,7 +52,6 @@ type ( epochDuration atomic.Uint64 statusIndex *innerRingIndexer precision precision.Fixed8Converter - auditClient *auditClient.Client healthStatus atomic.Value balanceClient *balanceClient.Client netmapClient *nmClient.Client @@ -74,7 +74,8 @@ type ( withoutMainNet bool // runtime processors - netmapProcessor *netmap.Processor + netmapProcessor *netmap.Processor + alphabetProcessor *alphabet.Processor workers []func(context.Context) @@ -99,8 +100,6 @@ type ( // should report start errors // to the application. runners []func(chan<- error) error - - subnetHandler } chainParams struct { @@ -168,7 +167,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { err = s.voteForSidechainValidator(prm) if err != nil { // we don't stop inner ring execution on this error - s.log.Warn("can't vote for prepared validators", + s.log.Warn(logs.InnerringCantVoteForPreparedValidators, zap.String("error", err.Error())) } @@ -210,13 +209,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { func (s *Server) registerMorphNewBlockEventHandler() { s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug("new block", + s.log.Debug(logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn("can't update persistent state", + s.log.Warn(logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -230,7 +229,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() { s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn("can't update persistent state", + s.log.Warn(logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -302,7 +301,7 @@ func (s *Server) Stop() { for _, c := range s.closers { if err := c(); err != nil { - s.log.Warn("closer error", + s.log.Warn(logs.InnerringCloserError, zap.String("error", err.Error()), ) } @@ -360,7 +359,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.initNotaryConfig(cfg) + server.initNotaryConfig() err = server.initContracts(cfg) if err != nil { @@ -386,24 +385,19 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - var processors *serverProcessors - processors, err = server.initProcessors(cfg, morphClients) + err = server.initProcessors(cfg, morphClients) if err != nil { return nil, err } - server.initTimers(cfg, processors, morphClients) + server.initTimers(cfg, morphClients) err = server.initGRPCServer(cfg) if err != nil { return nil, err } - server.initSubnet(subnetConfig{ - queueSize: cfg.GetUint32("workers.subnet"), - }) - - server.initMetrics(cfg) + server.metrics = metrics.NewInnerRingMetrics() return server, nil } @@ -547,7 +541,7 @@ func (s *Server) initConfigFromBlockchain() error { return err } - s.log.Debug("read config from blockchain", + s.log.Debug(logs.InnerringReadConfigFromBlockchain, zap.Bool("active", s.IsActive()), zap.Bool("alphabet", s.IsAlphabet()), zap.Uint64("epoch", epoch), @@ -577,16 +571,6 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) { return delta - blockHeight, nil } -// onlyActiveHandler wrapper around event handler that executes it -// only if inner ring node state is active. -func (s *Server) onlyActiveEventHandler(f event.Handler) event.Handler { - return func(ev event.Event) { - if s.IsActive() { - f(ev) - } - } -} - // onlyAlphabet wrapper around event handler that executes it // only if inner ring node is alphabet node. func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { @@ -606,3 +590,12 @@ func (s *Server) newEpochTickHandlers() []newEpochHandler { return newEpochHandlers } + +func (s *Server) SetExtraWallets(cfg *viper.Viper) error { + parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) + if err != nil { + return err + } + s.alphabetProcessor.SetParsedWallets(parsedWallets) + return nil +} diff --git a/pkg/innerring/internal/client/client.go b/pkg/innerring/internal/client/client.go deleted file mode 100644 index 3e95e9766..000000000 --- a/pkg/innerring/internal/client/client.go +++ /dev/null @@ -1,339 +0,0 @@ -package frostfsapiclient - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "io" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/storagegroup" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Client represents FrostFS API client cut down to the needs of a purely IR application. -type Client struct { - key *ecdsa.PrivateKey - - c clientcore.Client -} - -// WrapBasicClient wraps a client.Client instance to use it for FrostFS API RPC. -func (x *Client) WrapBasicClient(c clientcore.Client) { - x.c = c -} - -// SetPrivateKey sets a private key to sign RPC requests. -func (x *Client) SetPrivateKey(key *ecdsa.PrivateKey) { - x.key = key -} - -// SearchSGPrm groups parameters of SearchSG operation. -type SearchSGPrm struct { - cnrID cid.ID -} - -// SetContainerID sets the ID of the container to search for storage groups. -func (x *SearchSGPrm) SetContainerID(id cid.ID) { - x.cnrID = id -} - -// SearchSGRes groups the resulting values of SearchSG operation. -type SearchSGRes struct { - cliRes []oid.ID -} - -// IDList returns a list of IDs of storage groups in the container. -func (x SearchSGRes) IDList() []oid.ID { - return x.cliRes -} - -var sgFilter = storagegroup.SearchQuery() - -// SearchSG lists objects of storage group type in the container. -// -// Returns any error which prevented the operation from completing correctly in error return. -func (x Client) SearchSG(ctx context.Context, prm SearchSGPrm) (*SearchSGRes, error) { - var cliPrm client.PrmObjectSearch - cliPrm.InContainer(prm.cnrID) - cliPrm.SetFilters(sgFilter) - cliPrm.UseKey(*x.key) - - rdr, err := x.c.ObjectSearchInit(ctx, cliPrm) - if err != nil { - return nil, fmt.Errorf("init object search: %w", err) - } - - buf := make([]oid.ID, 10) - var list []oid.ID - var n int - var ok bool - - for { - n, ok = rdr.Read(buf) - for i := 0; i < n; i++ { - list = append(list, buf[i]) - } - if !ok { - break - } - } - - res, err := rdr.Close() - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(res.Status()) - } - - if err != nil { - return nil, fmt.Errorf("read object list: %w", err) - } - - return &SearchSGRes{ - cliRes: list, - }, nil -} - -// GetObjectPrm groups parameters of GetObject operation. -type GetObjectPrm struct { - getObjectPrm -} - -// GetObjectRes groups the resulting values of GetObject operation. -type GetObjectRes struct { - obj *object.Object -} - -// Object returns the received object. -func (x GetObjectRes) Object() *object.Object { - return x.obj -} - -// GetObject reads the object by address. -// -// Returns any error which prevented the operation from completing correctly in error return. -func (x Client) GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { - var cliPrm client.PrmObjectGet - cliPrm.FromContainer(prm.objAddr.Container()) - cliPrm.ByID(prm.objAddr.Object()) - cliPrm.UseKey(*x.key) - - rdr, err := x.c.ObjectGetInit(ctx, cliPrm) - if err != nil { - return nil, fmt.Errorf("init object search: %w", err) - } - - var obj object.Object - - if !rdr.ReadHeader(&obj) { - res, err := rdr.Close() - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(res.Status()) - } - - return nil, fmt.Errorf("read object header: %w", err) - } - - buf := make([]byte, obj.PayloadSize()) - - _, err = rdr.Read(buf) - if err != nil && !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("read payload: %w", err) - } - - obj.SetPayload(buf) - - return &GetObjectRes{ - obj: &obj, - }, nil -} - -// HeadObjectPrm groups parameters of HeadObject operation. -type HeadObjectPrm struct { - getObjectPrm - - raw bool - - local bool -} - -// SetRawFlag sets flag of raw request. -func (x *HeadObjectPrm) SetRawFlag() { - x.raw = true -} - -// SetTTL sets request TTL value. -func (x *HeadObjectPrm) SetTTL(ttl uint32) { - x.local = ttl < 2 -} - -// HeadObjectRes groups the resulting values of HeadObject operation. -type HeadObjectRes struct { - hdr *object.Object -} - -// Header returns the received object header. -func (x HeadObjectRes) Header() *object.Object { - return x.hdr -} - -// HeadObject reads short object header by address. -// -// Returns any error which prevented the operation from completing correctly in error return. -// For raw requests, returns *object.SplitInfoError error if the requested object is virtual. -func (x Client) HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) { - var cliPrm client.PrmObjectHead - - if prm.raw { - cliPrm.MarkRaw() - } - - if prm.local { - cliPrm.MarkLocal() - } - - cliPrm.FromContainer(prm.objAddr.Container()) - cliPrm.ByID(prm.objAddr.Object()) - cliPrm.UseKey(*x.key) - - cliRes, err := x.c.ObjectHead(ctx, cliPrm) - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(cliRes.Status()) - } - - if err != nil { - return nil, fmt.Errorf("read object header from FrostFS: %w", err) - } - - var hdr object.Object - - if !cliRes.ReadHeader(&hdr) { - return nil, errors.New("missing object header in the response") - } - - return &HeadObjectRes{ - hdr: &hdr, - }, nil -} - -// GetObjectPayload reads an object by address from FrostFS via Client and returns its payload. -// -// Returns any error which prevented the operation from completing correctly in error return. -func GetObjectPayload(ctx context.Context, c Client, addr oid.Address) ([]byte, error) { - var prm GetObjectPrm - - prm.SetAddress(addr) - - obj, err := c.GetObject(ctx, prm) - if err != nil { - return nil, err - } - - return obj.Object().Payload(), nil -} - -func headObject(ctx context.Context, c Client, addr oid.Address, raw bool, ttl uint32) (*object.Object, error) { - var prm HeadObjectPrm - - prm.SetAddress(addr) - prm.SetTTL(ttl) - - if raw { - prm.SetRawFlag() - } - - obj, err := c.HeadObject(ctx, prm) - if err != nil { - return nil, err - } - - return obj.Header(), nil -} - -// GetRawObjectHeaderLocally reads the raw short object header from the server's local storage by address via Client. -func GetRawObjectHeaderLocally(ctx context.Context, c Client, addr oid.Address) (*object.Object, error) { - return headObject(ctx, c, addr, true, 1) -} - -// GetObjectHeaderFromContainer reads the short object header by address via Client with TTL = 10 -// for deep traversal of the container. -func GetObjectHeaderFromContainer(ctx context.Context, c Client, addr oid.Address) (*object.Object, error) { - return headObject(ctx, c, addr, false, 10) -} - -// HashPayloadRangePrm groups parameters of HashPayloadRange operation. -type HashPayloadRangePrm struct { - getObjectPrm - - rng *object.Range -} - -// SetRange sets payload range to calculate the hash. -func (x *HashPayloadRangePrm) SetRange(rng *object.Range) { - x.rng = rng -} - -// HashPayloadRangeRes groups the resulting values of HashPayloadRange operation. -type HashPayloadRangeRes struct { - h []byte -} - -// Hash returns the hash of the object payload range. -func (x HashPayloadRangeRes) Hash() []byte { - return x.h -} - -// HashPayloadRange requests to calculate Tillich-Zemor hash of the payload range of the object -// from the remote server's local storage. -// -// Returns any error which prevented the operation from completing correctly in error return. -func (x Client) HashPayloadRange(ctx context.Context, prm HashPayloadRangePrm) (res HashPayloadRangeRes, err error) { - var cliPrm client.PrmObjectHash - cliPrm.FromContainer(prm.objAddr.Container()) - cliPrm.ByID(prm.objAddr.Object()) - cliPrm.SetRangeList(prm.rng.GetOffset(), prm.rng.GetLength()) - cliPrm.TillichZemorAlgo() - - cliRes, err := x.c.ObjectHash(ctx, cliPrm) - if err == nil { - // pull out an error from status - err = apistatus.ErrFromStatus(cliRes.Status()) - if err != nil { - return - } - - hs := cliRes.Checksums() - if ln := len(hs); ln != 1 { - err = fmt.Errorf("wrong number of checksums %d", ln) - } else { - res.h = hs[0] - } - } - - return -} - -// HashObjectRange reads Tillich-Zemor hash of the object payload range by address -// from the remote server's local storage via Client. -// -// Returns any error which prevented the operation from completing correctly in error return. -func HashObjectRange(ctx context.Context, c Client, addr oid.Address, rng *object.Range) ([]byte, error) { - var prm HashPayloadRangePrm - - prm.SetAddress(addr) - prm.SetRange(rng) - - res, err := c.HashPayloadRange(ctx, prm) - if err != nil { - return nil, err - } - - return res.Hash(), nil -} diff --git a/pkg/innerring/internal/client/doc.go b/pkg/innerring/internal/client/doc.go deleted file mode 100644 index a04b0627b..000000000 --- a/pkg/innerring/internal/client/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package frostfsapiclient provides functionality for IR application communication with FrostFS network. -// -// The basic client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client. -// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism), -// the IR application does not fully use the client's flexible interface. -// -// In this regard, this package represents an abstraction -- a type-wrapper over the base client. -// The type provides the minimum interface necessary for the application and also allows you to concentrate -// the entire spectrum of the client's use in one place (this will be convenient both when updating the base client -// and for evaluating the UX of SDK library). So, it is expected that all application packages will be limited -// to this package for the development of functionality requiring FrostFS API communication. -package frostfsapiclient diff --git a/pkg/innerring/internal/client/prm.go b/pkg/innerring/internal/client/prm.go deleted file mode 100644 index 9b5872434..000000000 --- a/pkg/innerring/internal/client/prm.go +++ /dev/null @@ -1,18 +0,0 @@ -package frostfsapiclient - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type objectAddressPrm struct { - objAddr oid.Address -} - -// SetAddress sets address of the object. -func (x *objectAddressPrm) SetAddress(addr oid.Address) { - x.objAddr = addr -} - -type getObjectPrm struct { - objectAddressPrm -} diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index 50353b574..30916cb99 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" @@ -52,14 +53,14 @@ func (s *Server) notaryHandler(_ event.Event) { if !s.mainNotaryConfig.disabled { _, err := s.depositMainNotary() if err != nil { - s.log.Error("can't make notary deposit in main chain", zap.Error(err)) + s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } if !s.sideNotaryConfig.disabled { _, err := s.depositSideNotary() if err != nil { - s.log.Error("can't make notary deposit in side chain", zap.Error(err)) + s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } } @@ -82,7 +83,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - s.log.Info("notary deposit has already been made") + s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade) return nil } diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 9d61aa812..c0668a4f9 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -1,6 +1,7 @@ package alphabet import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "go.uber.org/zap" @@ -8,14 +9,14 @@ import ( func (ap *Processor) HandleGasEmission(ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info("tick", zap.String("type", "alphabet gas emit")) + ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool err := ap.pool.Submit(func() { ap.processEmit() }) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn("alphabet processor worker pool drained", + ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go new file mode 100644 index 000000000..c098ca27d --- /dev/null +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -0,0 +1,281 @@ +package alphabet_test + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { + t.Parallel() + var emission uint64 = 100_000 + var index int = 5 + var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}} + + alphabetContracts := innerring.NewAlphabetContracts() + for i := 0; i <= index; i++ { + alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} + } + + morphClient := &testMorphClient{} + + var node1 netmap.NodeInfo + key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key1") + node1.SetPublicKey(key1.Bytes()) + + var node2 netmap.NodeInfo + key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") + require.NoError(t, err, "failed to parse key2") + node2.SetPublicKey(key2.Bytes()) + + nodes := []netmap.NodeInfo{node1, node2} + + network := &netmap.NetMap{} + network.SetNodes(nodes) + + netmapClient := &testNetmapClient{ + netmap: network, + } + + params := &alphabet.Params{ + ParsedWallets: parsedWallets, + Log: test.NewLogger(t, true), + PoolSize: 2, + StorageEmission: emission, + IRList: &testIndexer{index: index}, + AlphabetContracts: alphabetContracts, + MorphClient: morphClient, + NetmapClient: netmapClient, + } + + processor, err := alphabet.New(params) + require.NoError(t, err, "failed to create processor instance") + + processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + + processor.WaitPoolRunning() + + require.EqualValues(t, []invokedMethod{ + { + contract: alphabetContracts[innerring.GlagoliticLetter(index)], + fee: 0, + method: "emit", + }, + }, morphClient.invokedMethods, "invalid invoked morph methods") + + require.EqualValues(t, []transferGas{ + { + receiver: key1.GetScriptHash(), + amount: fixedn.Fixed8(25_000), + }, + { + receiver: key2.GetScriptHash(), + amount: fixedn.Fixed8(25_000), + }, + }, morphClient.transferedGas, "invalid transfered Gas") + + require.EqualValues(t, []batchTransferGas{ + { + receivers: parsedWallets, + amount: fixedn.Fixed8(25_000), + }, + }, morphClient.batchTransferedGas, "invalid batch transfered Gas") +} + +func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { + t.Parallel() + var emission uint64 = 100_000 + var index int = 5 + var parsedWallets []util.Uint160 = []util.Uint160{} + + alphabetContracts := innerring.NewAlphabetContracts() + for i := 0; i <= index; i++ { + alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} + } + + morphClient := &testMorphClient{} + + var node1 netmap.NodeInfo + key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key1") + node1.SetPublicKey(key1.Bytes()) + + var node2 netmap.NodeInfo + key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") + require.NoError(t, err, "failed to parse key2") + node2.SetPublicKey(key2.Bytes()) + + nodes := []netmap.NodeInfo{node1, node2} + + network := &netmap.NetMap{} + network.SetNodes(nodes) + + netmapClient := &testNetmapClient{ + netmap: network, + } + + params := &alphabet.Params{ + ParsedWallets: parsedWallets, + Log: test.NewLogger(t, true), + PoolSize: 2, + StorageEmission: emission, + IRList: &testIndexer{index: index}, + AlphabetContracts: alphabetContracts, + MorphClient: morphClient, + NetmapClient: netmapClient, + } + + processor, err := alphabet.New(params) + require.NoError(t, err, "failed to create processor instance") + + processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + + processor.WaitPoolRunning() + + require.EqualValues(t, []invokedMethod{ + { + contract: alphabetContracts[innerring.GlagoliticLetter(index)], + fee: 0, + method: "emit", + }, + }, morphClient.invokedMethods, "invalid invoked morph methods") + + require.EqualValues(t, []transferGas{ + { + receiver: key1.GetScriptHash(), + amount: fixedn.Fixed8(50_000), + }, + { + receiver: key2.GetScriptHash(), + amount: fixedn.Fixed8(50_000), + }, + }, morphClient.transferedGas, "invalid transfered Gas") + + require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas") +} + +func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { + t.Parallel() + var emission uint64 = 100_000 + var index int = 5 + var parsedWallets []util.Uint160 = []util.Uint160{} + + alphabetContracts := innerring.NewAlphabetContracts() + for i := 0; i <= index; i++ { + alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} + } + + morphClient := &testMorphClient{} + + nodes := []netmap.NodeInfo{} + network := &netmap.NetMap{} + network.SetNodes(nodes) + + netmapClient := &testNetmapClient{ + netmap: network, + } + + params := &alphabet.Params{ + ParsedWallets: parsedWallets, + Log: test.NewLogger(t, true), + PoolSize: 2, + StorageEmission: emission, + IRList: &testIndexer{index: index}, + AlphabetContracts: alphabetContracts, + MorphClient: morphClient, + NetmapClient: netmapClient, + } + + processor, err := alphabet.New(params) + require.NoError(t, err, "failed to create processor instance") + + processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + + processor.WaitPoolRunning() + + require.EqualValues(t, []invokedMethod{ + { + contract: alphabetContracts[innerring.GlagoliticLetter(index)], + fee: 0, + method: "emit", + }, + }, morphClient.invokedMethods, "invalid invoked morph methods") + + require.Equal(t, 0, len(morphClient.transferedGas), "invalid transfered Gas") + + require.Equal(t, 0, len(morphClient.batchTransferedGas), "invalid batch transfered Gas") +} + +type testIndexer struct { + index int +} + +func (i *testIndexer) AlphabetIndex() int { + return i.index +} + +type invokedMethod struct { + contract util.Uint160 + fee fixedn.Fixed8 + method string + args []any +} + +type transferGas struct { + receiver util.Uint160 + amount fixedn.Fixed8 +} + +type batchTransferGas struct { + receivers []util.Uint160 + amount fixedn.Fixed8 +} + +type testMorphClient struct { + invokedMethods []invokedMethod + transferedGas []transferGas + batchTransferedGas []batchTransferGas +} + +func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error { + c.invokedMethods = append(c.invokedMethods, + invokedMethod{ + contract: contract, + fee: fee, + method: method, + args: args, + }) + return nil +} +func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { + c.transferedGas = append(c.transferedGas, transferGas{ + receiver: receiver, + amount: amount, + }) + return nil +} + +func (c *testMorphClient) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error { + c.batchTransferedGas = append(c.batchTransferedGas, batchTransferGas{ + receivers: receivers, + amount: amount, + }) + return nil +} + +type testNetmapClient struct { + netmap *netmap.NetMap +} + +func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { + return c.netmap, nil +} diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 90c484b88..7a268ac52 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -3,9 +3,11 @@ package alphabet import ( "crypto/elliptic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -14,14 +16,14 @@ const emitMethod = "emit" func (ap *Processor) processEmit() { index := ap.irList.AlphabetIndex() if index < 0 { - ap.log.Info("non alphabet mode, ignore gas emission event") + ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug("node is out of alphabet range, ignore gas emission event", + ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return @@ -30,20 +32,20 @@ func (ap *Processor) processEmit() { // there is no signature collecting, so we don't need extra fee err := ap.morphClient.Invoke(contract, 0, emitMethod) if err != nil { - ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error())) + ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) return } if ap.storageEmission == 0 { - ap.log.Info("storage node emission is off") + ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff) return } networkMap, err := ap.netmapClient.NetMap() if err != nil { - ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes", + ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.String("error", err.Error())) return @@ -51,9 +53,12 @@ func (ap *Processor) processEmit() { nmNodes := networkMap.Nodes() nmLen := len(nmNodes) - extraLen := len(ap.parsedWallets) + ap.pwLock.RLock() + pw := ap.parsedWallets + ap.pwLock.RUnlock() + extraLen := len(pw) - ap.log.Debug("gas emission", + ap.log.Debug(logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -65,7 +70,7 @@ func (ap *Processor) processEmit() { ap.transferGasToNetmapNodes(nmNodes, gasPerNode) - ap.transferGasToExtraNodes(extraLen, gasPerNode) + ap.transferGasToExtraNodes(pw, gasPerNode) } func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { @@ -74,7 +79,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn("can't parse node public key", + ap.log.Warn(logs.AlphabetCantParseNodePublicKey, zap.String("error", err.Error())) continue @@ -82,7 +87,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn("can't transfer gas", + ap.log.Warn(logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), @@ -91,15 +96,15 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN } } -func (ap *Processor) transferGasToExtraNodes(extraLen int, gasPerNode fixedn.Fixed8) { - if extraLen != 0 { - err := ap.morphClient.BatchTransferGas(ap.parsedWallets, gasPerNode) +func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) { + if len(pw) > 0 { + err := ap.morphClient.BatchTransferGas(pw, gasPerNode) if err != nil { - receiversLog := make([]string, extraLen) - for i, addr := range ap.parsedWallets { + receiversLog := make([]string, len(pw)) + for i, addr := range pw { receiversLog[i] = addr.StringLE() } - ap.log.Warn("can't transfer gas to wallet", + ap.log.Warn(logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 980158132..cd9088e03 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -3,11 +3,14 @@ package alphabet import ( "errors" "fmt" + "sync" + "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" "go.uber.org/zap" @@ -31,14 +34,26 @@ type ( GetByIndex(int) (util.Uint160, bool) } + netmapClient interface { + NetMap() (*netmap.NetMap, error) + } + + morphClient interface { + Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) error + TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error + BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error + } + // Processor of events produced for alphabet contracts in the sidechain. Processor struct { - parsedWallets []util.Uint160 + parsedWallets []util.Uint160 + // protects parsedWallets from concurrent change + pwLock *sync.RWMutex log *logger.Logger pool *ants.Pool alphabetContracts Contracts - netmapClient *nmClient.Client - morphClient *client.Client + netmapClient netmapClient + morphClient morphClient irList Indexer storageEmission uint64 } @@ -49,8 +64,8 @@ type ( Log *logger.Logger PoolSize int AlphabetContracts Contracts - NetmapClient *nmClient.Client - MorphClient *client.Client + NetmapClient netmapClient + MorphClient morphClient IRList Indexer StorageEmission uint64 } @@ -67,7 +82,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { @@ -76,6 +91,7 @@ func New(p *Params) (*Processor, error) { return &Processor{ parsedWallets: p.ParsedWallets, + pwLock: new(sync.RWMutex), log: p.Log, pool: pool, alphabetContracts: p.AlphabetContracts, @@ -86,6 +102,12 @@ func New(p *Params) (*Processor, error) { }, nil } +func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) { + ap.pwLock.Lock() + ap.parsedWallets = parsedWallets + ap.pwLock.Unlock() +} + // ListenerNotificationParsers for the 'event.Listener' event producer. func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { return nil @@ -106,7 +128,10 @@ func (ap *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return nil } -// TimersHandlers for the 'Timers' event producer. -func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil +// WaitPoolRunning waits while pool has running tasks +// For use in test only. +func (ap *Processor) WaitPoolRunning() { + for ap.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } } diff --git a/pkg/innerring/processors/audit/events.go b/pkg/innerring/processors/audit/events.go deleted file mode 100644 index 4fb106122..000000000 --- a/pkg/innerring/processors/audit/events.go +++ /dev/null @@ -1,19 +0,0 @@ -package audit - -// Start is an event to start a new round of data audit. -type Start struct { - epoch uint64 -} - -// MorphEvent implements the Event interface. -func (a Start) MorphEvent() {} - -func NewAuditStartEvent(epoch uint64) Start { - return Start{ - epoch: epoch, - } -} - -func (a Start) Epoch() uint64 { - return a.epoch -} diff --git a/pkg/innerring/processors/audit/handlers.go b/pkg/innerring/processors/audit/handlers.go deleted file mode 100644 index 8b2354bb8..000000000 --- a/pkg/innerring/processors/audit/handlers.go +++ /dev/null @@ -1,21 +0,0 @@ -package audit - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "go.uber.org/zap" -) - -func (ap *Processor) handleNewAuditRound(ev event.Event) { - auditEvent := ev.(Start) - - epoch := auditEvent.Epoch() - - ap.log.Info("new round of audit", zap.Uint64("epoch", epoch)) - - // send an event to the worker pool - - err := ap.pool.Submit(func() { ap.processStartAudit(epoch) }) - if err != nil { - ap.log.Warn("previous round of audit prepare hasn't finished yet") - } -} diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go deleted file mode 100644 index 656927816..000000000 --- a/pkg/innerring/processors/audit/process.go +++ /dev/null @@ -1,216 +0,0 @@ -package audit - -import ( - "context" - "crypto/sha256" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -func (ap *Processor) processStartAudit(epoch uint64) { - log := ap.log.With(zap.Uint64("epoch", epoch)) - - ap.prevAuditCanceler() - - skipped := ap.taskManager.Reset() - if skipped > 0 { - ap.log.Info("some tasks from previous epoch are skipped", - zap.Int("amount", skipped), - ) - } - - containers, err := ap.selectContainersToAudit(epoch) - if err != nil { - log.Error("container selection failure", zap.String("error", err.Error())) - - return - } - - log.Info("select containers for audit", zap.Int("amount", len(containers))) - - nm, err := ap.netmapClient.GetNetMap(0) - if err != nil { - ap.log.Error("can't fetch network map", - zap.String("error", err.Error())) - - return - } - - cancelChannel := make(chan struct{}) - ap.prevAuditCanceler = func() { - select { - case <-cancelChannel: // already closed - default: - close(cancelChannel) - } - } - - pivot := make([]byte, sha256.Size) - - ap.startAuditTasksOnContainers(cancelChannel, containers, log, pivot, nm, epoch) -} - -func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{}, containers []cid.ID, log *zap.Logger, pivot []byte, nm *netmap.NetMap, epoch uint64) { - for i := range containers { - cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure - if err != nil { - log.Error("can't get container info, ignore", - zap.Stringer("cid", containers[i]), - zap.String("error", err.Error())) - - continue - } - - containers[i].Encode(pivot) - - // find all container nodes for current epoch - nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot) - if err != nil { - log.Info("can't build placement for container, ignore", - zap.Stringer("cid", containers[i]), - zap.String("error", err.Error())) - - continue - } - - n := placement.FlattenNodes(nodes) - - // shuffle nodes to ask a random one - rand.Shuffle(len(n), func(i, j int) { - n[i], n[j] = n[j], n[i] - }) - - // search storage groups - storageGroupsIDs := ap.findStorageGroups(containers[i], n) - log.Info("select storage groups for audit", - zap.Stringer("cid", containers[i]), - zap.Int("amount", len(storageGroupsIDs))) - - // filter expired storage groups - storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm) - log.Info("filter expired storage groups for audit", - zap.Stringer("cid", containers[i]), - zap.Int("amount", len(storageGroups))) - - // skip audit for containers without - // non-expired storage groups - if len(storageGroupsIDs) == 0 { - continue - } - - auditTask := new(audit.Task). - WithReporter(&epochAuditReporter{ - epoch: epoch, - rep: ap.reporter, - }). - WithCancelChannel(cancelChannel). - WithContainerID(containers[i]). - WithStorageGroupList(storageGroups). - WithContainerStructure(cnr.Value). - WithContainerNodes(nodes). - WithNetworkMap(nm) - - ap.taskManager.PushTask(auditTask) - } -} - -func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []oid.ID { - var sg []oid.ID - - ln := len(shuffled) - - var ( - info clientcore.NodeInfo - prm storagegroup.SearchSGPrm - ) - - prm.Container = cnr - - for i := range shuffled { // consider iterating over some part of container - log := ap.log.With( - zap.Stringer("cid", cnr), - zap.String("key", netmap.StringifyPublicKey(shuffled[0])), - zap.Int("try", i), - zap.Int("total_tries", ln), - ) - - err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i])) - if err != nil { - log.Warn("parse client node info", zap.String("error", err.Error())) - - continue - } - - ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout) - - prm.NodeInfo = info - - var dst storagegroup.SearchSGDst - - err = ap.sgSrc.ListSG(ctx, &dst, prm) - - cancel() - - if err != nil { - log.Warn("error in storage group search", zap.String("error", err.Error())) - continue - } - - sg = append(sg, dst.Objects...) - - break // we found storage groups, so break loop - } - - return sg -} - -func (ap *Processor) filterExpiredSG(cid cid.ID, sgIDs []oid.ID, - cnr [][]netmap.NodeInfo, nm netmap.NetMap) []storagegroup.StorageGroup { - sgs := make([]storagegroup.StorageGroup, 0, len(sgIDs)) - var coreSG storagegroup.StorageGroup - - var getSGPrm storagegroup.GetSGPrm - getSGPrm.CID = cid - getSGPrm.Container = cnr - getSGPrm.NetMap = nm - - for _, sgID := range sgIDs { - ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout) - - getSGPrm.OID = sgID - - sg, err := ap.sgSrc.GetSG(ctx, getSGPrm) - - cancel() - - if err != nil { - ap.log.Error( - "could not get storage group object for audit, skipping", - zap.Stringer("cid", cid), - zap.Stringer("oid", sgID), - zap.Error(err), - ) - continue - } - - // filter expired epochs - if sg.ExpirationEpoch() >= ap.epochSrc.EpochCounter() { - coreSG.SetID(sgID) - coreSG.SetStorageGroup(*sg) - - sgs = append(sgs, coreSG) - } - } - - return sgs -} diff --git a/pkg/innerring/processors/audit/processor.go b/pkg/innerring/processors/audit/processor.go deleted file mode 100644 index 31e8a8c55..000000000 --- a/pkg/innerring/processors/audit/processor.go +++ /dev/null @@ -1,148 +0,0 @@ -package audit - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/panjf2000/ants/v2" -) - -type ( - // Indexer is a callback interface for inner ring global state. - Indexer interface { - InnerRingIndex() int - InnerRingSize() int - } - - TaskManager interface { - PushTask(*audit.Task) - - // Must skip all tasks planned for execution and - // return their number. - Reset() int - } - - // EpochSource is an interface that provides actual - // epoch information. - EpochSource interface { - // EpochCounter must return current epoch number. - EpochCounter() uint64 - } - - // Processor of events related to data audit. - Processor struct { - log *logger.Logger - pool *ants.Pool - irList Indexer - sgSrc storagegroup.SGSource - epochSrc EpochSource - searchTimeout time.Duration - - containerClient *cntClient.Client - netmapClient *nmClient.Client - - taskManager TaskManager - reporter audit.Reporter - prevAuditCanceler context.CancelFunc - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - NetmapClient *nmClient.Client - ContainerClient *cntClient.Client - IRList Indexer - SGSource storagegroup.SGSource - RPCSearchTimeout time.Duration - TaskManager TaskManager - Reporter audit.Reporter - Key *ecdsa.PrivateKey - EpochSource EpochSource - } -) - -type epochAuditReporter struct { - epoch uint64 - - rep audit.Reporter -} - -// ProcessorPoolSize limits pool size for audit Processor. Processor manages -// audit tasks and fills queue for the next epoch. This process must not be interrupted -// by a new audit epoch, so we limit the pool size for the processor to one. -const ProcessorPoolSize = 1 - -// New creates audit processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/audit: logger is not set") - case p.IRList == nil: - return nil, errors.New("ir/audit: global state is not set") - case p.SGSource == nil: - return nil, errors.New("ir/audit: SG source is not set") - case p.TaskManager == nil: - return nil, errors.New("ir/audit: audit task manager is not set") - case p.Reporter == nil: - return nil, errors.New("ir/audit: audit result reporter is not set") - case p.Key == nil: - return nil, errors.New("ir/audit: signing key is not set") - case p.EpochSource == nil: - return nil, errors.New("ir/audit: epoch source is not set") - } - - pool, err := ants.NewPool(ProcessorPoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/audit: can't create worker pool: %w", err) - } - - return &Processor{ - log: p.Log, - pool: pool, - containerClient: p.ContainerClient, - irList: p.IRList, - sgSrc: p.SGSource, - epochSrc: p.EpochSource, - searchTimeout: p.RPCSearchTimeout, - netmapClient: p.NetmapClient, - taskManager: p.TaskManager, - reporter: p.Reporter, - prevAuditCanceler: func() {}, - }, nil -} - -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - return nil -} - -// TimersHandlers for the 'Timers' event producer. -func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} - -// StartAuditHandler for the internal event producer. -func (ap *Processor) StartAuditHandler() event.Handler { - return ap.handleNewAuditRound -} - -func (r *epochAuditReporter) WriteReport(rep *audit.Report) error { - res := rep.Result() - res.ForEpoch(r.epoch) - - return r.rep.WriteReport(rep) -} diff --git a/pkg/innerring/processors/audit/scheduler.go b/pkg/innerring/processors/audit/scheduler.go deleted file mode 100644 index e1a521bad..000000000 --- a/pkg/innerring/processors/audit/scheduler.go +++ /dev/null @@ -1,64 +0,0 @@ -package audit - -import ( - "errors" - "fmt" - "sort" - "strings" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" -) - -var ErrInvalidIRNode = errors.New("node is not in the inner ring list") - -func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) { - containers, err := ap.containerClient.ContainersOf(nil) - if err != nil { - return nil, fmt.Errorf("can't get list of containers to start audit: %w", err) - } - - // consider getting extra information about container complexity from - // audit contract there - ap.log.Debug("container listing finished", - zap.Int("total amount", len(containers)), - ) - - sort.Slice(containers, func(i, j int) bool { - return strings.Compare(containers[i].EncodeToString(), containers[j].EncodeToString()) < 0 - }) - - ind := ap.irList.InnerRingIndex() - irSize := ap.irList.InnerRingSize() - - if ind < 0 || ind >= irSize { - return nil, ErrInvalidIRNode - } - - return Select(containers, epoch, uint64(ind), uint64(irSize)), nil -} - -func Select(ids []cid.ID, epoch, index, size uint64) []cid.ID { - if index >= size { - return nil - } - - var a, b uint64 - - ln := uint64(len(ids)) - pivot := ln % size - delta := ln / size - - index = (index + epoch) % size - if index < pivot { - a = delta + 1 - } else { - a = delta - b = pivot - } - - from := a*index + b - to := a*(index+1) + b - - return ids[from:to] -} diff --git a/pkg/innerring/processors/audit/scheduler_test.go b/pkg/innerring/processors/audit/scheduler_test.go deleted file mode 100644 index 51461beb7..000000000 --- a/pkg/innerring/processors/audit/scheduler_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package audit_test - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/stretchr/testify/require" -) - -func TestSelect(t *testing.T) { - cids := generateContainers(10) - - t.Run("invalid input", func(t *testing.T) { - require.Empty(t, audit.Select(cids, 0, 0, 0)) - }) - - t.Run("even split", func(t *testing.T) { - const irSize = 5 // every node takes two audit nodes - - m := hitMap(cids) - - for i := 0; i < irSize; i++ { - s := audit.Select(cids, 0, uint64(i), irSize) - require.Equal(t, len(cids)/irSize, len(s)) - - for _, id := range s { - n, ok := m[id.EncodeToString()] - require.True(t, ok) - require.Equal(t, 0, n) - m[id.EncodeToString()] = 1 - } - } - - require.True(t, allHit(m)) - }) - - t.Run("odd split", func(t *testing.T) { - const irSize = 3 - - m := hitMap(cids) - - for i := 0; i < irSize; i++ { - s := audit.Select(cids, 0, uint64(i), irSize) - - for _, id := range s { - n, ok := m[id.EncodeToString()] - require.True(t, ok) - require.Equal(t, 0, n) - m[id.EncodeToString()] = 1 - } - } - - require.True(t, allHit(m)) - }) - - t.Run("epoch shift", func(t *testing.T) { - const irSize = 4 - - m := hitMap(cids) - - for i := 0; i < irSize; i++ { - s := audit.Select(cids, uint64(i), 0, irSize) - - for _, id := range s { - n, ok := m[id.EncodeToString()] - require.True(t, ok) - require.Equal(t, 0, n) - m[id.EncodeToString()] = 1 - } - } - - require.True(t, allHit(m)) - }) -} - -func generateContainers(n int) []cid.ID { - result := make([]cid.ID, n) - - for i := 0; i < n; i++ { - result[i] = cidtest.ID() - } - - return result -} - -func hitMap(ids []cid.ID) map[string]int { - result := make(map[string]int, len(ids)) - - for _, id := range ids { - result[id.EncodeToString()] = 0 - } - - return result -} - -func allHit(m map[string]int) bool { - for _, v := range m { - if v == 0 { - return false - } - } - - return true -} diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index 4c5a2ddc6..e325da1f9 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -3,6 +3,7 @@ package balance import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" "go.uber.org/zap" @@ -10,7 +11,7 @@ import ( func (bp *Processor) handleLock(ev event.Event) { lock := ev.(balanceEvent.Lock) - bp.log.Info("notification", + bp.log.Info(logs.Notification, zap.String("type", "lock"), zap.String("value", hex.EncodeToString(lock.ID()))) @@ -19,7 +20,7 @@ func (bp *Processor) handleLock(ev event.Event) { err := bp.pool.Submit(func() { bp.processLock(&lock) }) if err != nil { // there system can be moved into controlled degradation stage - bp.log.Warn("balance worker pool drained", + bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained, zap.Int("capacity", bp.pool.Cap())) } } diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go new file mode 100644 index 000000000..3470fba2d --- /dev/null +++ b/pkg/innerring/processors/balance/handlers_test.go @@ -0,0 +1,90 @@ +package balance + +import ( + "testing" + "time" + + frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" + balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) { + t.Parallel() + as := &testAlphabetState{ + isAlphabet: true, + } + conv := &testPresicionConverter{} + cl := &testFrostFSContractClient{} + bsc := util.Uint160{100} + + processor, err := New(&Params{ + Log: test.NewLogger(t, true), + PoolSize: 2, + FrostFSClient: cl, + BalanceSC: bsc, + AlphabetState: as, + Converter: conv, + }) + require.NoError(t, err, "failed to create processor") + + processor.handleLock(balanceEvent.Lock{}) + + for processor.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.Equal(t, 1, cl.chequeCalls, "invalid Cheque calls") +} + +func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) { + t.Parallel() + as := &testAlphabetState{} + conv := &testPresicionConverter{} + cl := &testFrostFSContractClient{} + bsc := util.Uint160{100} + + processor, err := New(&Params{ + Log: test.NewLogger(t, true), + PoolSize: 2, + FrostFSClient: cl, + BalanceSC: bsc, + AlphabetState: as, + Converter: conv, + }) + require.NoError(t, err, "failed to create processor") + + processor.handleLock(balanceEvent.Lock{}) + + for processor.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.Equal(t, 0, cl.chequeCalls, "invalid Cheque calls") +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testPresicionConverter struct { +} + +func (c *testPresicionConverter) ToFixed8(v int64) int64 { + return v +} + +type testFrostFSContractClient struct { + chequeCalls int +} + +func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error { + c.chequeCalls++ + return nil +} diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index 754dda34a..3f86a3cb7 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -1,6 +1,7 @@ package balance import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" "go.uber.org/zap" @@ -10,7 +11,7 @@ import ( // back to the withdraw issuer. func (bp *Processor) processLock(lock *balanceEvent.Lock) { if !bp.alphabetState.IsAlphabet() { - bp.log.Info("non alphabet mode, ignore balance lock") + bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock) return } @@ -24,6 +25,6 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) { err := bp.frostfsClient.Cheque(prm) if err != nil { - bp.log.Error("can't send lock asset tx", zap.Error(err)) + bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err)) } } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index 2527b7ec3..356754cfb 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" @@ -24,11 +25,15 @@ type ( ToFixed8(int64) int64 } + FrostFSClient interface { + Cheque(p frostfscontract.ChequePrm) error + } + // Processor of events produced by balance contract in the morphchain. Processor struct { log *logger.Logger pool *ants.Pool - frostfsClient *frostfscontract.Client + frostfsClient FrostFSClient balanceSC util.Uint160 alphabetState AlphabetState converter PrecisionConverter @@ -38,7 +43,7 @@ type ( Params struct { Log *logger.Logger PoolSize int - FrostFSClient *frostfscontract.Client + FrostFSClient FrostFSClient BalanceSC util.Uint160 AlphabetState AlphabetState Converter PrecisionConverter @@ -60,7 +65,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { @@ -114,8 +119,3 @@ func (bp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { func (bp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return nil } - -// TimersHandlers for the 'Timers' event producer. -func (bp *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index f9f8b5841..2ab1147c8 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -3,6 +3,7 @@ package container import ( "crypto/sha256" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "github.com/mr-tron/base58" @@ -13,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) { put := ev.(putEvent) id := sha256.Sum256(put.Container()) - cp.log.Info("notification", + cp.log.Info(logs.Notification, zap.String("type", "container put"), zap.String("id", base58.Encode(id[:]))) @@ -22,23 +23,23 @@ func (cp *Processor) handlePut(ev event.Event) { err := cp.pool.Submit(func() { cp.processContainerPut(put) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } func (cp *Processor) handleDelete(ev event.Event) { del := ev.(containerEvent.Delete) - cp.log.Info("notification", + cp.log.Info(logs.Notification, zap.String("type", "container delete"), zap.String("id", base58.Encode(del.ContainerID()))) // send an event to the worker pool - err := cp.pool.Submit(func() { cp.processContainerDelete(&del) }) + err := cp.pool.Submit(func() { cp.processContainerDelete(del) }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } @@ -46,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) { func (cp *Processor) handleSetEACL(ev event.Event) { e := ev.(containerEvent.SetEACL) - cp.log.Info("notification", + cp.log.Info(logs.Notification, zap.String("type", "set EACL"), ) @@ -57,7 +58,7 @@ func (cp *Processor) handleSetEACL(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn("container processor worker pool drained", + cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go new file mode 100644 index 000000000..6075ff577 --- /dev/null +++ b/pkg/innerring/processors/container/handlers_test.go @@ -0,0 +1,337 @@ +package container + +import ( + "crypto/ecdsa" + "encoding/hex" + "testing" + "time" + + containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" + containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" + frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/network/payload" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestPutEvent(t *testing.T) { + t.Parallel() + nst := &testNetworkState{ + homHashDisabled: true, + epoch: 100, + } + cc := &testContainerClient{ + get: make(map[string]*containercore.Container), + } + + proc, err := New(&Params{ + Log: test.NewLogger(t, true), + PoolSize: 2, + AlphabetState: &testAlphabetState{isAlphabet: true}, + FrostFSIDClient: &testIDClient{}, + NotaryDisabled: true, + NetworkState: nst, + ContainerClient: cc, + }) + require.NoError(t, err, "failed to create processor") + + p, err := keys.NewPrivateKey() + require.NoError(t, err) + var usr user.ID + user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey())) + + var pp netmap.PlacementPolicy + pp.AddReplicas(netmap.ReplicaDescriptor{}) + + var cnr containerSDK.Container + cnr.Init() + cnr.SetOwner(usr) + cnr.SetPlacementPolicy(pp) + cnr.SetBasicACL(acl.Private) + containerSDK.DisableHomomorphicHashing(&cnr) + + event := &testPutEvent{ + cnr: &cnr, + pk: p, + st: nil, + } + + proc.handlePut(event) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expectedPut cntClient.PutPrm + expectedPut.SetContainer(cnr.Marshal()) + expectedPut.SetKey(p.PublicKey().Bytes()) + expectedPut.SetSignature(p.Sign(cnr.Marshal())) + expectedPut.SetZone("container") + + require.EqualValues(t, []cntClient.PutPrm{expectedPut}, cc.put, "invalid put requests") +} + +func TestDeleteEvent(t *testing.T) { + t.Parallel() + nst := &testNetworkState{ + homHashDisabled: true, + epoch: 100, + } + cc := &testContainerClient{ + get: make(map[string]*containercore.Container), + } + + p, err := keys.NewPrivateKey() + require.NoError(t, err) + + idc := &testIDClient{ + publicKeys: []*keys.PublicKey{ + p.PublicKey(), + }, + } + + proc, err := New(&Params{ + Log: test.NewLogger(t, true), + PoolSize: 2, + AlphabetState: &testAlphabetState{isAlphabet: true}, + FrostFSIDClient: idc, + NotaryDisabled: true, + NetworkState: nst, + ContainerClient: cc, + }) + require.NoError(t, err, "failed to create processor") + + var usr user.ID + user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey())) + + var pp netmap.PlacementPolicy + pp.AddReplicas(netmap.ReplicaDescriptor{}) + + var cnr containerSDK.Container + cnr.Init() + cnr.SetOwner(usr) + cnr.SetPlacementPolicy(pp) + cnr.SetBasicACL(acl.Private) + containerSDK.DisableHomomorphicHashing(&cnr) + + var cid cid.ID + containerSDK.CalculateID(&cid, cnr) + cidBin := make([]byte, 32) + cid.Encode(cidBin) + + ev := containerEvent.Delete{ + ContainerIDValue: cidBin, + SignatureValue: p.Sign(cidBin), + } + + var signature frostfscrypto.Signature + signer := frostfsecdsa.Signer(p.PrivateKey) + require.NoError(t, signature.Calculate(signer, ev.ContainerID()), "failed to calculate signature") + cc.get[hex.EncodeToString(ev.ContainerID())] = &containercore.Container{ + Value: cnr, + Signature: signature, + } + + proc.handleDelete(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expectedDelete cntClient.DeletePrm + expectedDelete.SetCID(ev.ContainerID()) + expectedDelete.SetSignature(ev.Signature()) + + require.EqualValues(t, []cntClient.DeletePrm{expectedDelete}, cc.delete, "invalid delete requests") +} + +func TestSetEACLEvent(t *testing.T) { + t.Parallel() + nst := &testNetworkState{ + homHashDisabled: true, + epoch: 100, + } + cc := &testContainerClient{ + get: make(map[string]*containercore.Container), + } + + proc, err := New(&Params{ + Log: test.NewLogger(t, true), + PoolSize: 2, + AlphabetState: &testAlphabetState{isAlphabet: true}, + FrostFSIDClient: &testIDClient{}, + NotaryDisabled: true, + NetworkState: nst, + ContainerClient: cc, + }) + require.NoError(t, err, "failed to create processor") + + p, err := keys.NewPrivateKey() + require.NoError(t, err) + + var usr user.ID + user.IDFromKey(&usr, (ecdsa.PublicKey)(*p.PublicKey())) + + var pp netmap.PlacementPolicy + pp.AddReplicas(netmap.ReplicaDescriptor{}) + + var cnr containerSDK.Container + cnr.Init() + cnr.SetOwner(usr) + cnr.SetPlacementPolicy(pp) + cnr.SetBasicACL(acl.PrivateExtended) + containerSDK.DisableHomomorphicHashing(&cnr) + + var cid cid.ID + containerSDK.CalculateID(&cid, cnr) + cidBytes := make([]byte, 32) + cid.Encode(cidBytes) + + var signature frostfscrypto.Signature + signer := frostfsecdsa.Signer(p.PrivateKey) + require.NoError(t, signature.Calculate(signer, cidBytes), "failed to calculate signature") + + cc.get[hex.EncodeToString(cidBytes)] = &containercore.Container{ + Value: cnr, + Signature: signature, + } + + table := eacl.NewTable() + table.SetCID(cid) + table.SetVersion(version.Current()) + + r := &eacl.Record{} + r.AddObjectContainerIDFilter(eacl.MatchStringEqual, cid) + + table.AddRecord(r) + + event := containerEvent.SetEACL{ + TableValue: table.ToV2().StableMarshal(nil), + PublicKeyValue: p.PublicKey().Bytes(), + SignatureValue: p.Sign(table.ToV2().StableMarshal(nil)), + } + + proc.handleSetEACL(event) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expectedPutEACL cntClient.PutEACLPrm + expectedPutEACL.SetTable(table.ToV2().StableMarshal(nil)) + expectedPutEACL.SetKey(p.PublicKey().Bytes()) + expectedPutEACL.SetSignature(p.Sign(table.ToV2().StableMarshal(nil))) + + require.EqualValues(t, []cntClient.PutEACLPrm{expectedPutEACL}, cc.putEACL, "invalid set EACL requests") +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testNetworkState struct { + homHashDisabled bool + epoch uint64 +} + +func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) { + return s.homHashDisabled, nil +} + +func (s *testNetworkState) Epoch() (uint64, error) { + return s.epoch, nil +} + +type testContainerClient struct { + contractAddress util.Uint160 + put []cntClient.PutPrm + get map[string]*containercore.Container + delete []cntClient.DeletePrm + putEACL []cntClient.PutEACLPrm +} + +func (c *testContainerClient) ContractAddress() util.Uint160 { + return c.contractAddress +} + +func (c *testContainerClient) Morph() *client.Client { + return nil +} + +func (c *testContainerClient) Put(p cntClient.PutPrm) error { + c.put = append(c.put, p) + return nil +} + +func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) { + key := hex.EncodeToString(cid) + if cont, found := c.get[key]; found { + return cont, nil + } + return nil, apistatus.ContainerNotFound{} +} + +func (c *testContainerClient) Delete(p cntClient.DeletePrm) error { + c.delete = append(c.delete, p) + return nil +} + +func (c *testContainerClient) PutEACL(p cntClient.PutEACLPrm) error { + c.putEACL = append(c.putEACL, p) + return nil +} + +type testIDClient struct { + publicKeys keys.PublicKeys +} + +func (c *testIDClient) AccountKeys(p frostfsid.AccountKeysPrm) (keys.PublicKeys, error) { + return c.publicKeys, nil +} + +var _ putEvent = &testPutEvent{} + +type testPutEvent struct { + cnr *containerSDK.Container + pk *keys.PrivateKey + st []byte +} + +func (e *testPutEvent) MorphEvent() {} + +func (e *testPutEvent) Container() []byte { + return e.cnr.Marshal() +} + +func (e *testPutEvent) PublicKey() []byte { + return e.pk.PublicKey().Bytes() +} + +func (e *testPutEvent) Signature() []byte { + return e.pk.Sign(e.cnr.Marshal()) +} + +func (e *testPutEvent) SessionToken() []byte { + return e.st +} +func (e *testPutEvent) NotaryRequest() *payload.P2PNotaryRequest { + return nil +} diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 8b244aa5d..603ab86d1 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -3,14 +3,13 @@ package container import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" "github.com/nspcc-dev/neo-go/pkg/network/payload" "go.uber.org/zap" ) @@ -35,7 +34,7 @@ type putContainerContext struct { // and sending approve tx back to the morph. func (cp *Processor) processContainerPut(put putEvent) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore container put") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut) return } @@ -45,7 +44,7 @@ func (cp *Processor) processContainerPut(put putEvent) { err := cp.checkPutContainer(ctx) if err != nil { - cp.log.Error("put container check failed", + cp.log.Error(logs.ContainerPutContainerCheckFailed, zap.String("error", err.Error()), ) @@ -76,12 +75,6 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { return fmt.Errorf("auth container creation: %w", err) } - // check owner allowance in the subnetwork - err = checkSubnet(cp.subnetClient, cnr) - if err != nil { - return fmt.Errorf("incorrect subnetwork: %w", err) - } - // check homomorphic hashing setting err = checkHomomorphicHashing(cp.netState, cnr) if err != nil { @@ -119,7 +112,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) { err = cp.cnrClient.Put(prm) } if err != nil { - cp.log.Error("could not approve put container", + cp.log.Error(logs.ContainerCouldNotApprovePutContainer, zap.String("error", err.Error()), ) } @@ -127,15 +120,15 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) { // Process delete container operation from the user by checking container sanity // and sending approve tx back to morph. -func (cp *Processor) processContainerDelete(e *containerEvent.Delete) { +func (cp *Processor) processContainerDelete(e containerEvent.Delete) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore container delete") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete) return } err := cp.checkDeleteContainer(e) if err != nil { - cp.log.Error("delete container check failed", + cp.log.Error(logs.ContainerDeleteContainerCheckFailed, zap.String("error", err.Error()), ) @@ -145,7 +138,7 @@ func (cp *Processor) processContainerDelete(e *containerEvent.Delete) { cp.approveDeleteContainer(e) } -func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error { +func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { binCnr := e.ContainerID() var idCnr cid.ID @@ -177,7 +170,7 @@ func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error { return nil } -func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) { +func (cp *Processor) approveDeleteContainer(e containerEvent.Delete) { var err error prm := cntClient.DeletePrm{} @@ -194,7 +187,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) { err = cp.cnrClient.Delete(prm) } if err != nil { - cp.log.Error("could not approve delete container", + cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer, zap.String("error", err.Error()), ) } @@ -221,29 +214,6 @@ func checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error { return nil } -func checkSubnet(subCli *morphsubnet.Client, cnr containerSDK.Container) error { - prm := morphsubnet.UserAllowedPrm{} - - subID := cnr.PlacementPolicy().Subnet() - if subnetid.IsZero(subID) { - return nil - } - - prm.SetID(subID.Marshal()) - prm.SetClient(cnr.Owner().WalletBytes()) - - res, err := subCli.UserAllowed(prm) - if err != nil { - return fmt.Errorf("could not check user in contract: %w", err) - } - - if !res.Allowed() { - return fmt.Errorf("user is not allowed to create containers in %v subnetwork", subID) - } - - return nil -} - func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error { netSetting, err := ns.HomomorphicHashDisabled() if err != nil { diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go index e8bbb5db6..a8d880c8f 100644 --- a/pkg/innerring/processors/container/process_eacl.go +++ b/pkg/innerring/processors/container/process_eacl.go @@ -4,22 +4,23 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" + containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "go.uber.org/zap" ) -func (cp *Processor) processSetEACL(e container.SetEACL) { +func (cp *Processor) processSetEACL(e containerEvent.SetEACL) { if !cp.alphabetState.IsAlphabet() { - cp.log.Info("non alphabet mode, ignore set EACL") + cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL) return } err := cp.checkSetEACL(e) if err != nil { - cp.log.Error("set EACL check failed", + cp.log.Error(logs.ContainerSetEACLCheckFailed, zap.String("error", err.Error()), ) @@ -29,7 +30,7 @@ func (cp *Processor) processSetEACL(e container.SetEACL) { cp.approveSetEACL(e) } -func (cp *Processor) checkSetEACL(e container.SetEACL) error { +func (cp *Processor) checkSetEACL(e containerEvent.SetEACL) error { binTable := e.Table() // unmarshal table @@ -73,7 +74,7 @@ func (cp *Processor) checkSetEACL(e container.SetEACL) error { return nil } -func (cp *Processor) approveSetEACL(e container.SetEACL) { +func (cp *Processor) approveSetEACL(e containerEvent.SetEACL) { var err error prm := cntClient.PutEACLPrm{} @@ -91,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) { err = cp.cnrClient.PutEACL(prm) } if err != nil { - cp.log.Error("could not approve set EACL", + cp.log.Error(logs.ContainerCouldNotApproveSetEACL, zap.String("error", err.Error()), ) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index ae0d28729..2141b0764 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -4,13 +4,17 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" "go.uber.org/zap" ) @@ -21,14 +25,26 @@ type ( IsAlphabet() bool } + ContClient interface { + ContractAddress() util.Uint160 + Morph() *client.Client + Put(p cntClient.PutPrm) error + Get(cid []byte) (*containercore.Container, error) + Delete(p cntClient.DeletePrm) error + PutEACL(p cntClient.PutEACLPrm) error + } + + IDClient interface { + AccountKeys(p frostfsid.AccountKeysPrm) (keys.PublicKeys, error) + } + // Processor of events produced by container contract in the sidechain. Processor struct { log *logger.Logger pool *ants.Pool alphabetState AlphabetState - cnrClient *container.Client // notary must be enabled - idClient *frostfsid.Client - subnetClient *morphsubnet.Client + cnrClient ContClient // notary must be enabled + idClient IDClient netState NetworkState notaryDisabled bool } @@ -38,9 +54,8 @@ type ( Log *logger.Logger PoolSize int AlphabetState AlphabetState - ContainerClient *container.Client - FrostFSIDClient *frostfsid.Client - SubnetClient *morphsubnet.Client + ContainerClient ContClient + FrostFSIDClient IDClient NetworkState NetworkState NotaryDisabled bool } @@ -64,13 +79,6 @@ type NetworkState interface { HomomorphicHashDisabled() (bool, error) } -const ( - putNotification = "containerPut" - deleteNotification = "containerDelete" - - setEACLNotification = "setEACL" -) - // New creates a container contract processor instance. func New(p *Params) (*Processor, error) { switch { @@ -84,11 +92,9 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: FrostFS ID client is not set") case p.NetworkState == nil: return nil, errors.New("ir/container: network state is not set") - case p.SubnetClient == nil: - return nil, errors.New("ir/container: subnet client is not set") } - p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { @@ -103,72 +109,17 @@ func New(p *Params) (*Processor, error) { idClient: p.FrostFSIDClient, netState: p.NetworkState, notaryDisabled: p.NotaryDisabled, - subnetClient: p.SubnetClient, }, nil } // ListenerNotificationParsers for the 'event.Listener' event producer. func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - if !cp.notaryDisabled { - return nil - } - - var ( - parsers = make([]event.NotificationParserInfo, 0, 3) - - p event.NotificationParserInfo - ) - - p.SetScriptHash(cp.cnrClient.ContractAddress()) - - // container put - p.SetType(event.TypeFromString(putNotification)) - p.SetParser(containerEvent.ParsePut) - parsers = append(parsers, p) - - // container delete - p.SetType(event.TypeFromString(deleteNotification)) - p.SetParser(containerEvent.ParseDelete) - parsers = append(parsers, p) - - // set eACL - p.SetType(event.TypeFromString(setEACLNotification)) - p.SetParser(containerEvent.ParseSetEACL) - parsers = append(parsers, p) - - return parsers + return nil } // ListenerNotificationHandlers for the 'event.Listener' event producer. func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - if !cp.notaryDisabled { - return nil - } - - var ( - handlers = make([]event.NotificationHandlerInfo, 0, 3) - - h event.NotificationHandlerInfo - ) - - h.SetScriptHash(cp.cnrClient.ContractAddress()) - - // container put - h.SetType(event.TypeFromString(putNotification)) - h.SetHandler(cp.handlePut) - handlers = append(handlers, h) - - // container delete - h.SetType(event.TypeFromString(deleteNotification)) - h.SetHandler(cp.handleDelete) - handlers = append(handlers, h) - - // set eACL - h.SetType(event.TypeFromString(setEACLNotification)) - h.SetHandler(cp.handleSetEACL) - handlers = append(handlers, h) - - return handlers + return nil } // ListenerNotaryParsers for the 'event.Listener' notary event producer. @@ -237,8 +188,3 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return hh } - -// TimersHandlers for the 'Timers' event producer. -func (cp *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index bc0dbec7f..574cf057a 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -3,6 +3,7 @@ package frostfs import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "github.com/nspcc-dev/neo-go/pkg/util/slice" @@ -11,97 +12,97 @@ import ( func (np *Processor) handleDeposit(ev event.Event) { deposit := ev.(frostfsEvent.Deposit) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "deposit"), zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID())))) // send event to the worker pool - err := np.pool.Submit(func() { np.processDeposit(&deposit) }) + err := np.pool.Submit(func() { np.processDeposit(deposit) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleWithdraw(ev event.Event) { withdraw := ev.(frostfsEvent.Withdraw) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "withdraw"), zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID())))) // send event to the worker pool - err := np.pool.Submit(func() { np.processWithdraw(&withdraw) }) + err := np.pool.Submit(func() { np.processWithdraw(withdraw) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCheque(ev event.Event) { cheque := ev.(frostfsEvent.Cheque) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "cheque"), zap.String("id", hex.EncodeToString(cheque.ID()))) // send event to the worker pool - err := np.pool.Submit(func() { np.processCheque(&cheque) }) + err := np.pool.Submit(func() { np.processCheque(cheque) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleConfig(ev event.Event) { cfg := ev.(frostfsEvent.Config) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "set config"), zap.String("key", hex.EncodeToString(cfg.Key())), zap.String("value", hex.EncodeToString(cfg.Value()))) // send event to the worker pool - err := np.pool.Submit(func() { np.processConfig(&cfg) }) + err := np.pool.Submit(func() { np.processConfig(cfg) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleBind(ev event.Event) { e := ev.(frostfsEvent.Bind) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "bind"), ) // send event to the worker pool - err := np.pool.Submit(func() { np.processBind(e) }) + err := np.pool.Submit(func() { np.processBind(e, true) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleUnbind(ev event.Event) { e := ev.(frostfsEvent.Unbind) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "unbind"), ) // send event to the worker pool - err := np.pool.Submit(func() { np.processBind(e) }) + err := np.pool.Submit(func() { np.processBind(e, false) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("frostfs processor worker pool drained", + np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go new file mode 100644 index 000000000..db7835811 --- /dev/null +++ b/pkg/innerring/processors/frostfs/handlers_test.go @@ -0,0 +1,373 @@ +package frostfs + +import ( + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" + nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestHandleDeposit(t *testing.T) { + t.Parallel() + es := &testEpochState{ + epochCounter: 100, + } + b := &testBalaceClient{} + m := &testMorphClient{ + balance: 150, + } + proc, err := newTestProc(t, func(p *Params) { + p.EpochState = es + p.BalanceClient = b + p.MorphClient = m + }) + require.NoError(t, err, "failed to create processor") + + ev := frostfsEvent.Deposit{ + IDValue: []byte{1, 2, 3, 4, 5}, + FromValue: util.Uint160{100}, + ToValue: util.Uint160{200}, + AmountValue: 1000, + } + + proc.handleDeposit(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expMint balance.MintPrm + expMint.SetAmount(ev.AmountValue) + expMint.SetID(ev.IDValue) + expMint.SetTo(ev.ToValue) + + require.EqualValues(t, []balance.MintPrm{expMint}, b.mint, "invalid mint value") + require.EqualValues(t, []transferGas{ + { + receiver: ev.ToValue, + amount: fixedn.Fixed8(50), + }, + }, m.transferGas, "invalid transfer gas") + + es.epochCounter = 109 + + proc.handleDeposit(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + expMint.SetAmount(ev.AmountValue) + expMint.SetID(ev.IDValue) + expMint.SetTo(ev.ToValue) + + require.EqualValues(t, []balance.MintPrm{expMint, expMint}, b.mint, "invalid mint value") + require.EqualValues(t, []transferGas{ + { + receiver: ev.ToValue, + amount: fixedn.Fixed8(50), + }, + }, m.transferGas, "invalid transfer gas") +} + +func TestHandleWithdraw(t *testing.T) { + t.Parallel() + es := &testEpochState{ + epochCounter: 100, + } + b := &testBalaceClient{} + m := &testMorphClient{ + balance: 150, + } + proc, err := newTestProc(t, func(p *Params) { + p.EpochState = es + p.BalanceClient = b + p.MorphClient = m + }) + require.NoError(t, err, "failed to create processor") + + ev := frostfsEvent.Withdraw{ + IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + UserValue: util.Uint160{100}, + AmountValue: 1000, + } + + proc.handleWithdraw(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + lock, err := util.Uint160DecodeBytesBE(ev.ID()[:util.Uint160Size]) + require.NoError(t, err, "failed to decode ID") + var expLock balance.LockPrm + expLock.SetAmount(ev.AmountValue) + expLock.SetID(ev.IDValue) + expLock.SetDueEpoch(int64(es.epochCounter) + int64(lockAccountLifetime)) + expLock.SetLock(lock) + expLock.SetUser(ev.UserValue) + + require.EqualValues(t, []balance.LockPrm{expLock}, b.lock, "invalid lock value") +} + +func TestHandleCheque(t *testing.T) { + t.Parallel() + es := &testEpochState{ + epochCounter: 100, + } + b := &testBalaceClient{} + m := &testMorphClient{ + balance: 150, + } + proc, err := newTestProc(t, func(p *Params) { + p.BalanceClient = b + p.MorphClient = m + p.EpochState = es + }) + require.NoError(t, err, "failed to create processor") + + ev := frostfsEvent.Cheque{ + IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + UserValue: util.Uint160{100}, + AmountValue: 1000, + LockValue: util.Uint160{200}, + } + + proc.handleCheque(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expBurn balance.BurnPrm + expBurn.SetAmount(ev.AmountValue) + expBurn.SetID(ev.IDValue) + expBurn.SetTo(util.Uint160{200}) + + require.EqualValues(t, []balance.BurnPrm{expBurn}, b.burn, "invalid burn value") +} + +func TestHandleConfig(t *testing.T) { + t.Parallel() + es := &testEpochState{ + epochCounter: 100, + } + nm := &testNetmapClient{} + m := &testMorphClient{ + balance: 150, + } + proc, err := newTestProc(t, func(p *Params) { + p.NetmapClient = nm + p.MorphClient = m + p.EpochState = es + }) + require.NoError(t, err, "failed to create processor") + + ev := frostfsEvent.Config{ + IDValue: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, + KeyValue: []byte{1, 2, 3, 4, 5}, + ValueValue: []byte{6, 7, 8, 9, 0}, + TxHashValue: util.Uint256{100}, + } + + proc.handleConfig(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expConfig nmClient.SetConfigPrm + expConfig.SetHash(ev.TxHashValue) + expConfig.SetID(ev.IDValue) + expConfig.SetKey(ev.KeyValue) + expConfig.SetValue(ev.ValueValue) + + require.EqualValues(t, []nmClient.SetConfigPrm{expConfig}, nm.config, "invalid config value") +} + +func TestHandleUnbind(t *testing.T) { + t.Parallel() + es := &testEpochState{ + epochCounter: 100, + } + m := &testMorphClient{ + balance: 150, + } + id := &testIDClient{} + proc, err := newTestProc(t, func(p *Params) { + p.EpochState = es + p.MorphClient = m + p.FrostFSIDClient = id + }) + require.NoError(t, err, "failed to create processor") + + p, err := keys.NewPrivateKey() + require.NoError(t, err) + + evUnbind := frostfsEvent.Unbind{ + BindCommon: frostfsEvent.BindCommon{ + UserValue: util.Uint160{49}.BytesBE(), + KeysValue: [][]byte{ + p.PublicKey().Bytes(), + }, + TxHashValue: util.Uint256{100}, + }, + } + + proc.handleUnbind(evUnbind) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var userID user.ID + userID.SetScriptHash(util.Uint160{49}) + + var expBind frostfsid.CommonBindPrm + expBind.SetOwnerID(userID.WalletBytes()) + expBind.SetKeys(evUnbind.BindCommon.KeysValue) + expBind.SetHash(evUnbind.BindCommon.TxHashValue) + + var expNilSlice []frostfsid.CommonBindPrm + + require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.remove, "invalid remove keys value") + require.EqualValues(t, expNilSlice, id.add, "invalid add keys value") + + evBind := frostfsEvent.Bind{ + BindCommon: frostfsEvent.BindCommon{ + UserValue: util.Uint160{49}.BytesBE(), + KeysValue: [][]byte{ + p.PublicKey().Bytes(), + }, + TxHashValue: util.Uint256{100}, + }, + } + + proc.handleBind(evBind) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.remove, "invalid remove keys value") + require.EqualValues(t, []frostfsid.CommonBindPrm{expBind}, id.add, "invalid add keys value") +} + +func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { + p := &Params{ + Log: test.NewLogger(t, true), + PoolSize: 1, + FrostFSContract: util.Uint160{0}, + FrostFSIDClient: &testIDClient{}, + BalanceClient: &testBalaceClient{}, + NetmapClient: &testNetmapClient{}, + MorphClient: &testMorphClient{}, + EpochState: &testEpochState{}, + AlphabetState: &testAlphabetState{isAlphabet: true}, + Converter: &testPrecisionConverter{}, + MintEmitCacheSize: 100, + MintEmitThreshold: 10, + MintEmitValue: fixedn.Fixed8(50), + GasBalanceThreshold: 50, + } + + nonDefault(p) + + return New(p) +} + +type testEpochState struct { + epochCounter uint64 +} + +func (s *testEpochState) EpochCounter() uint64 { + return s.epochCounter +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testPrecisionConverter struct { +} + +func (c *testPrecisionConverter) ToBalancePrecision(v int64) int64 { + return v +} + +type testBalaceClient struct { + mint []balance.MintPrm + lock []balance.LockPrm + burn []balance.BurnPrm +} + +func (c *testBalaceClient) Mint(p balance.MintPrm) error { + c.mint = append(c.mint, p) + return nil +} +func (c *testBalaceClient) Lock(p balance.LockPrm) error { + c.lock = append(c.lock, p) + return nil +} +func (c *testBalaceClient) Burn(p balance.BurnPrm) error { + c.burn = append(c.burn, p) + return nil +} + +type testNetmapClient struct { + config []nmClient.SetConfigPrm +} + +func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error { + c.config = append(c.config, p) + return nil +} + +type transferGas struct { + receiver util.Uint160 + amount fixedn.Fixed8 +} + +type testMorphClient struct { + balance int64 + transferGas []transferGas +} + +func (c *testMorphClient) GasBalance() (res int64, err error) { + return c.balance, nil +} +func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { + c.transferGas = append(c.transferGas, transferGas{ + receiver: receiver, + amount: amount, + }) + return nil +} + +type testIDClient struct { + add []frostfsid.CommonBindPrm + remove []frostfsid.CommonBindPrm +} + +func (c *testIDClient) AddKeys(p frostfsid.CommonBindPrm) error { + c.add = append(c.add, p) + return nil +} + +func (c *testIDClient) RemoveKeys(args frostfsid.CommonBindPrm) error { + c.remove = append(c.remove, args) + return nil +} diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index b28efaa33..cfbf21b08 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -1,6 +1,7 @@ package frostfs import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "github.com/nspcc-dev/neo-go/pkg/util" @@ -14,9 +15,9 @@ const ( // Process deposit event by invoking a balance contract and sending native // gas in the sidechain. -func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { +func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore deposit") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit) return } @@ -29,7 +30,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { // send transferX to a balance contract err := np.balanceClient.Mint(prm) if err != nil { - np.log.Error("can't transfer assets to balance contract", zap.Error(err)) + np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -43,7 +44,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn("double mint emission declined", + np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined, zap.String("receiver", receiver.String()), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -55,12 +56,12 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error("can't get gas balance of the node", zap.Error(err)) + np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return } if balance < np.gasBalanceThreshold { - np.log.Warn("gas balance threshold has been reached", + np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -69,7 +70,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error("can't transfer native gas to receiver", + np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver, zap.String("error", err.Error())) return @@ -79,16 +80,16 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) { } // Process withdraw event by locking assets in the balance account. -func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) { +func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore withdraw") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw) return } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error("can't create lock account", zap.Error(err)) + np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err)) return } @@ -104,15 +105,15 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) { err = np.balanceClient.Lock(prm) if err != nil { - np.log.Error("can't lock assets for withdraw", zap.Error(err)) + np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) } } // Process cheque event by transferring assets from the lock account back to // the reserve account. -func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) { +func (np *Processor) processCheque(cheque frostfsEvent.Cheque) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore cheque") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque) return } @@ -124,6 +125,6 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) { err := np.balanceClient.Burn(prm) if err != nil { - np.log.Error("can't transfer assets to fed contract", zap.Error(err)) + np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) } } diff --git a/pkg/innerring/processors/frostfs/process_bind.go b/pkg/innerring/processors/frostfs/process_bind.go index 0abce5827..a9b523a77 100644 --- a/pkg/innerring/processors/frostfs/process_bind.go +++ b/pkg/innerring/processors/frostfs/process_bind.go @@ -4,8 +4,8 @@ import ( "crypto/elliptic" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/util" @@ -18,21 +18,20 @@ type bindCommon interface { TxHash() util.Uint256 } -func (np *Processor) processBind(e bindCommon) { +func (np *Processor) processBind(e bindCommon, bind bool) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore bind") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreBind) return } c := &bindCommonContext{ bindCommon: e, + bind: bind, } - _, c.bind = e.(frostfs.Bind) - err := np.checkBindCommon(c) if err != nil { - np.log.Error("invalid manage key event", + np.log.Error(logs.FrostFSInvalidManageKeyEvent, zap.Bool("bind", c.bind), zap.String("error", err.Error()), ) @@ -77,7 +76,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) { u160, err := util.Uint160DecodeBytesBE(scriptHash) if err != nil { - np.log.Error("could not decode script hash from bytes", + np.log.Error(logs.FrostFSCouldNotDecodeScriptHashFromBytes, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index ecc90332f..ce2dabfdd 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -1,6 +1,7 @@ package frostfs import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" "go.uber.org/zap" @@ -8,9 +9,9 @@ import ( // Process config event by setting configuration value from the mainchain in // the sidechain. -func (np *Processor) processConfig(config *frostfsEvent.Config) { +func (np *Processor) processConfig(config frostfsEvent.Config) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore config") + np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig) return } @@ -23,6 +24,6 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) { err := np.netmapClient.SetConfig(prm) if err != nil { - np.log.Error("can't relay set config event", zap.Error(err)) + np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) } } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index e9504cdb4..2af15a814 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -35,14 +35,34 @@ type ( ToBalancePrecision(int64) int64 } + BalanceClient interface { + Mint(p balance.MintPrm) error + Lock(p balance.LockPrm) error + Burn(p balance.BurnPrm) error + } + + NetmapClient interface { + SetConfig(p nmClient.SetConfigPrm) error + } + + MorphClient interface { + GasBalance() (res int64, err error) + TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error + } + + IDClient interface { + AddKeys(p frostfsid.CommonBindPrm) error + RemoveKeys(args frostfsid.CommonBindPrm) error + } + // Processor of events produced by frostfs contract in main net. Processor struct { log *logger.Logger pool *ants.Pool frostfsContract util.Uint160 - balanceClient *balance.Client - netmapClient *nmClient.Client - morphClient *client.Client + balanceClient BalanceClient + netmapClient NetmapClient + morphClient MorphClient epochState EpochState alphabetState AlphabetState converter PrecisionConverter @@ -51,8 +71,7 @@ type ( mintEmitThreshold uint64 mintEmitValue fixedn.Fixed8 gasBalanceThreshold int64 - - frostfsIDClient *frostfsid.Client + frostfsIDClient IDClient } // Params of the processor constructor. @@ -60,10 +79,10 @@ type ( Log *logger.Logger PoolSize int FrostFSContract util.Uint160 - FrostFSIDClient *frostfsid.Client - BalanceClient *balance.Client - NetmapClient *nmClient.Client - MorphClient *client.Client + FrostFSIDClient IDClient + BalanceClient BalanceClient + NetmapClient NetmapClient + MorphClient MorphClient EpochState EpochState AlphabetState AlphabetState Converter PrecisionConverter @@ -98,7 +117,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { @@ -225,8 +244,3 @@ func (np *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return nil } - -// TimersHandlers for the 'Timers' event producer. -func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go index bfa88d3f0..727acc21a 100644 --- a/pkg/innerring/processors/governance/handlers.go +++ b/pkg/innerring/processors/governance/handlers.go @@ -1,6 +1,7 @@ package governance import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" "github.com/nspcc-dev/neo-go/pkg/core/native" @@ -30,14 +31,14 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { return } - gp.log.Info("new event", zap.String("type", typ)) + gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ)) // send event to the worker pool err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) }) if err != nil { // there system can be moved into controlled degradation stage - gp.log.Warn("governance worker pool drained", + gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained, zap.Int("capacity", gp.pool.Cap())) } } diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go new file mode 100644 index 000000000..23f91869a --- /dev/null +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -0,0 +1,304 @@ +package governance + +import ( + "encoding/binary" + "sort" + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" + nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestHandleAlphabetSyncEvent(t *testing.T) { + t.Parallel() + testKeys := generateTestKeys(t) + + es := &testEpochState{ + epoch: 100, + } + as := &testAlphabetState{ + isAlphabet: true, + } + v := &testVoter{} + irf := &testIRFetcher{ + publicKeys: testKeys.sidechainKeys, + } + m := &testMorphClient{ + commiteeKeys: testKeys.sidechainKeys, + } + mn := &testMainnetClient{ + alphabetKeys: testKeys.mainnetKeys, + } + f := &testFrostFSClient{} + nm := &testNetmapClient{} + + proc, err := New( + &Params{ + Log: test.NewLogger(t, true), + EpochState: es, + AlphabetState: as, + Voter: v, + IRFetcher: irf, + NotaryDisabled: true, + MorphClient: m, + MainnetClient: mn, + FrostFSClient: f, + NetmapClient: nm, + }, + ) + + require.NoError(t, err, "failed to create processor") + + ev := Sync{ + txHash: util.Uint256{100}, + } + + proc.HandleAlphabetSync(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []VoteValidatorPrm{ + { + Validators: testKeys.newAlphabetExp, + Hash: &ev.txHash, + }, + }, v.votes, "invalid vote calls") + + var irUpdateExp nmClient.UpdateIRPrm + irUpdateExp.SetKeys(testKeys.newInnerRingExp) + irUpdateExp.SetHash(ev.txHash) + + require.EqualValues(t, []nmClient.UpdateIRPrm{irUpdateExp}, nm.updates, "invalid IR updates") + + var expAlphabetUpdates []client.UpdateAlphabetListPrm + require.EqualValues(t, expAlphabetUpdates, m.alphabetUpdates, "invalid alphabet updates") + + var expNotaryUpdates []client.UpdateNotaryListPrm + require.EqualValues(t, expNotaryUpdates, m.notaryUpdates, "invalid notary list updates") + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, es.epoch) + + id := append([]byte(alphabetUpdateIDPrefix), buf...) + var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm + expFrostFSAlphabetUpd.SetID(id) + expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp) + + require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates") +} + +func TestHandleAlphabetDesignateEvent(t *testing.T) { + t.Parallel() + testKeys := generateTestKeys(t) + + es := &testEpochState{ + epoch: 100, + } + as := &testAlphabetState{ + isAlphabet: true, + } + v := &testVoter{} + irf := &testIRFetcher{ + publicKeys: testKeys.sidechainKeys, + } + m := &testMorphClient{ + commiteeKeys: testKeys.sidechainKeys, + } + mn := &testMainnetClient{ + alphabetKeys: testKeys.mainnetKeys, + } + f := &testFrostFSClient{} + nm := &testNetmapClient{} + + proc, err := New( + &Params{ + Log: test.NewLogger(t, true), + EpochState: es, + AlphabetState: as, + Voter: v, + IRFetcher: irf, + NotaryDisabled: false, + MorphClient: m, + MainnetClient: mn, + FrostFSClient: f, + NetmapClient: nm, + }, + ) + + require.NoError(t, err, "failed to create processor") + + ev := rolemanagement.Designate{ + TxHash: util.Uint256{100}, + Role: noderoles.NeoFSAlphabet, + } + + proc.HandleAlphabetSync(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []VoteValidatorPrm{ + { + Validators: testKeys.newAlphabetExp, + Hash: &ev.TxHash, + }, + }, v.votes, "invalid vote calls") + + var irUpdatesExp []nmClient.UpdateIRPrm + require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates") + + var alpabetUpdExp client.UpdateAlphabetListPrm + alpabetUpdExp.SetList(testKeys.newInnerRingExp) + alpabetUpdExp.SetHash(ev.TxHash) + require.EqualValues(t, []client.UpdateAlphabetListPrm{alpabetUpdExp}, m.alphabetUpdates, "invalid alphabet updates") + + var expNotaryUpdate client.UpdateNotaryListPrm + expNotaryUpdate.SetList(testKeys.newAlphabetExp) + expNotaryUpdate.SetHash(ev.TxHash) + require.EqualValues(t, []client.UpdateNotaryListPrm{expNotaryUpdate}, m.notaryUpdates, "invalid notary list updates") + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, es.epoch) + + id := append([]byte(alphabetUpdateIDPrefix), buf...) + var expFrostFSAlphabetUpd frostfscontract.AlphabetUpdatePrm + expFrostFSAlphabetUpd.SetID(id) + expFrostFSAlphabetUpd.SetPubs(testKeys.newAlphabetExp) + + require.EqualValues(t, []frostfscontract.AlphabetUpdatePrm{expFrostFSAlphabetUpd}, f.updates, "invalid FrostFS alphabet updates") +} + +type testKeys struct { + sidechainKeys keys.PublicKeys + mainnetKeys keys.PublicKeys + newAlphabetExp keys.PublicKeys + newInnerRingExp keys.PublicKeys +} + +func generateTestKeys(t *testing.T) testKeys { + for { + var result testKeys + + for i := 0; i < 4; i++ { + pk, err := keys.NewPrivateKey() + require.NoError(t, err, "failed to create private key") + result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey()) + } + + result.mainnetKeys = append(result.mainnetKeys, result.sidechainKeys...) + pk, err := keys.NewPrivateKey() + require.NoError(t, err, "failed to create private key") + result.mainnetKeys = append(result.mainnetKeys, pk.PublicKey()) + + result.newAlphabetExp, err = newAlphabetList(result.sidechainKeys, result.mainnetKeys) + require.NoError(t, err, "failed to create expected new alphabet") + + if len(result.newAlphabetExp) == 0 { + continue //can be happen because of random and sort + } + + var irKeys keys.PublicKeys + irKeys = append(irKeys, result.sidechainKeys...) + result.newInnerRingExp, err = updateInnerRing(irKeys, result.sidechainKeys, result.newAlphabetExp) + require.NoError(t, err, "failed to create expected new IR") + sort.Sort(result.newInnerRingExp) + + return result + } +} + +type testEpochState struct { + epoch uint64 +} + +func (s *testEpochState) EpochCounter() uint64 { + return s.epoch +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testVoter struct { + votes []VoteValidatorPrm +} + +func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error { + v.votes = append(v.votes, prm) + return nil +} + +type testIRFetcher struct { + publicKeys keys.PublicKeys +} + +func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { + return f.publicKeys, nil +} + +type testMorphClient struct { + commiteeKeys keys.PublicKeys + + alphabetUpdates []client.UpdateAlphabetListPrm + notaryUpdates []client.UpdateNotaryListPrm +} + +func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) { + return c.commiteeKeys, nil +} + +func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error { + c.alphabetUpdates = append(c.alphabetUpdates, prm) + return nil +} + +func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error { + c.notaryUpdates = append(c.notaryUpdates, prm) + return nil +} + +type testMainnetClient struct { + alphabetKeys keys.PublicKeys + designateHash util.Uint160 +} + +func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) { + return c.alphabetKeys, nil +} + +func (c *testMainnetClient) GetDesignateHash() util.Uint160 { + return c.designateHash +} + +type testFrostFSClient struct { + updates []frostfscontract.AlphabetUpdatePrm +} + +func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error { + c.updates = append(c.updates, p) + return nil +} + +type testNetmapClient struct { + updates []nmClient.UpdateIRPrm +} + +func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error { + c.updates = append(c.updates, p) + return nil +} diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 3504e7a53..629d8741e 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -6,6 +6,7 @@ import ( "sort" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,37 +21,37 @@ const ( func (gp *Processor) processAlphabetSync(txHash util.Uint256) { if !gp.alphabetState.IsAlphabet() { - gp.log.Info("non alphabet mode, ignore alphabet sync") + gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return } mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { - gp.log.Error("can't fetch alphabet list from main net", + gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet, zap.String("error", err.Error())) return } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { - gp.log.Error("can't fetch alphabet list from side chain", + gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain, zap.String("error", err.Error())) return } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { - gp.log.Error("can't merge alphabet lists from main net and side chain", + gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, zap.String("error", err.Error())) return } if newAlphabet == nil { - gp.log.Info("no governance update, alphabet list has not been changed") + gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) return } - gp.log.Info("alphabet list has been changed, starting update", + gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), zap.String("new_alphabet", prettyKeys(newAlphabet)), ) @@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { // 1. Vote to sidechain committee via alphabet contracts. err = gp.voter.VoteForSidechainValidator(votePrm) if err != nil { - gp.log.Error("can't vote for side chain committee", + gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee, zap.String("error", err.Error())) } @@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { // 4. Update FrostFS contract in the mainnet. gp.updateFrostFSContractInMainnet(newAlphabet) - gp.log.Info("finished alphabet list update") + gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate) } func prettyKeys(keys keys.PublicKeys) string { @@ -94,21 +95,21 @@ func prettyKeys(keys keys.PublicKeys) string { func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { - gp.log.Error("can't fetch inner ring list from side chain", + gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain, zap.String("error", err.Error())) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error("can't create new inner ring list with new alphabet keys", + gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) return } sort.Sort(newInnerRing) - gp.log.Info("update of the inner ring list", + gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -130,7 +131,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl } if err != nil { - gp.log.Error("can't update inner ring list with new alphabet keys", + gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) } } @@ -147,7 +148,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx err := gp.morphClient.UpdateNotaryList(updPrm) if err != nil { - gp.log.Error("can't update list of notary nodes in side chain", + gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, zap.String("error", err.Error())) } } @@ -167,7 +168,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) err := gp.frostfsClient.AlphabetUpdate(prm) if err != nil { - gp.log.Error("can't update list of alphabet nodes in frostfs contract", + gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 9397186ee..e08bd3809 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -53,20 +53,39 @@ type ( InnerRingKeys() (keys.PublicKeys, error) } + FrostFSClient interface { + AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error + } + + NetmapClient interface { + UpdateInnerRing(p nmClient.UpdateIRPrm) error + } + + MainnetClient interface { + NeoFSAlphabetList() (res keys.PublicKeys, err error) + GetDesignateHash() util.Uint160 + } + + MorphClient interface { + Committee() (res keys.PublicKeys, err error) + UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error + UpdateNotaryList(prm client.UpdateNotaryListPrm) error + } + // Processor of events related to governance in the network. Processor struct { log *logger.Logger pool *ants.Pool - frostfsClient *frostfscontract.Client - netmapClient *nmClient.Client + frostfsClient FrostFSClient + netmapClient NetmapClient alphabetState AlphabetState epochState EpochState voter Voter irFetcher IRFetcher - mainnetClient *client.Client - morphClient *client.Client + mainnetClient MainnetClient + morphClient MorphClient notaryDisabled bool @@ -82,10 +101,10 @@ type ( Voter Voter IRFetcher IRFetcher - MorphClient *client.Client - MainnetClient *client.Client - FrostFSClient *frostfscontract.Client - NetmapClient *nmClient.Client + MorphClient MorphClient + MainnetClient MainnetClient + FrostFSClient FrostFSClient + NetmapClient NetmapClient NotaryDisabled bool } @@ -161,8 +180,3 @@ func (gp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { func (gp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return nil } - -// TimersHandlers for the 'Timers' event producer. -func (gp *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index 54e4ea3ab..6adeac562 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -3,30 +3,30 @@ package netmap import ( "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" "go.uber.org/zap" ) func (np *Processor) HandleNewEpochTick(ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info("tick", zap.String("type", "epoch")) + np.log.Info(logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool err := np.pool.Submit(func() { np.processNewEpochTick() }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleNewEpoch(ev event.Event) { epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "new epoch"), zap.Uint64("value", epochEvent.EpochNumber())) @@ -37,7 +37,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -45,7 +45,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { func (np *Processor) handleAddPeer(ev event.Event) { newPeer := ev.(netmapEvent.AddPeer) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "add peer"), ) @@ -56,14 +56,14 @@ func (np *Processor) handleAddPeer(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleUpdateState(ev event.Event) { updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info("notification", + np.log.Info(logs.Notification, zap.String("type", "update peer state"), zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) @@ -74,21 +74,21 @@ func (np *Processor) handleUpdateState(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCleanupTick(ev event.Event) { if !np.netmapSnapshot.enabled { - np.log.Debug("netmap clean up routine is disabled") + np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518) return } cleanup := ev.(netmapCleanupTick) - np.log.Info("tick", zap.String("type", "netmap cleaner")) + np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner")) // send event to the worker pool err := np.pool.Submit(func() { @@ -96,26 +96,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", - zap.Int("capacity", np.pool.Cap())) - } -} - -func (np *Processor) handleRemoveNode(ev event.Event) { - removeNode := ev.(subnetevents.RemoveNode) - - np.log.Info("notification", - zap.String("type", "remove node from subnet"), - zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())), - zap.String("key", hex.EncodeToString(removeNode.Node())), - ) - - err := np.pool.Submit(func() { - np.processRemoveSubnetNode(removeNode) - }) - if err != nil { - // there system can be moved into controlled degradation stage - np.log.Warn("netmap worker pool drained", + np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go new file mode 100644 index 000000000..4905b45d2 --- /dev/null +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -0,0 +1,562 @@ +package netmap + +import ( + "fmt" + "testing" + "time" + + v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" + timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" + netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/core/transaction" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/network/payload" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestNewEpochTick(t *testing.T) { + t.Parallel() + es := &testEpochState{ + counter: 100, + } + nc := &testNetmapClient{} + + proc, err := newTestProc(t, func(p *Params) { + p.NotaryDisabled = true + p.CleanupEnabled = true + p.EpochState = es + p.NetmapClient = nc + }) + + require.NoError(t, err, "failed to create processor") + + ev := timerEvent.NewEpochTick{} + proc.HandleNewEpochTick(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []uint64{101}, nc.newEpochs, "invalid epochs") +} + +func TestNewEpoch(t *testing.T) { + t.Parallel() + var node1 netmap.NodeInfo + key1, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key1") + node1.SetPublicKey(key1.Bytes()) + + var node2 netmap.NodeInfo + key2, err := keys.NewPublicKeyFromString("02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3") + require.NoError(t, err, "failed to parse key2") + node2.SetPublicKey(key2.Bytes()) + + network := &netmap.NetMap{} + network.SetNodes([]netmap.NodeInfo{node1, node2}) + + es := &testEpochState{ + counter: 100, + duration: 10, + } + r := &testEpochResetter{} + cc := &testContainerClient{} + nc := &testNetmapClient{ + epochDuration: 20, + txHeights: map[util.Uint256]uint32{ + {101}: 10_000, + }, + netmap: network, + } + eh := &testEventHandler{} + + proc, err := newTestProc(t, func(p *Params) { + p.NotaryDisabled = true + p.NotaryDepositHandler = eh.Handle + p.AlphabetSyncHandler = eh.Handle + p.NetmapClient = nc + p.ContainerWrapper = cc + p.EpochTimer = r + p.EpochState = es + }) + + require.NoError(t, err, "failed to create processor") + + ev := netmapEvent.NewEpoch{ + Num: 101, + Hash: util.Uint256{101}, + } + proc.handleNewEpoch(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.Equal(t, nc.epochDuration, es.duration, "invalid epoch duration") + require.Equal(t, ev.Num, es.counter, "invalid epoch counter") + require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets") + + var expEstimation cntClient.StartEstimationPrm + expEstimation.SetEpoch(ev.Num - 1) + expEstimation.SetHash(ev.Hash) + require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations") + + require.EqualValues(t, []event.Event{ + governance.NewSyncEvent(ev.TxHash()), + ev, + }, eh.handledEvents, "invalid handled events") +} + +func TestAddPeer(t *testing.T) { + t.Parallel() + + t.Run("with notary", func(t *testing.T) { + t.Parallel() + nc := &testNetmapClient{ + contractAddress: util.Uint160{47}, + } + + proc, err := newTestProc(t, func(p *Params) { + p.NotaryDisabled = true + p.NetmapClient = nc + }) + + require.NoError(t, err, "failed to create processor") + + var node netmap.NodeInfo + key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key1") + node.SetPublicKey(key.Bytes()) + + ev := netmapEvent.AddPeer{ + NodeBytes: node.Marshal(), + Request: &payload.P2PNotaryRequest{ + MainTransaction: &transaction.Transaction{ + Nonce: 100, + }, + }, + } + proc.handleAddPeer(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []notaryInvoke{ + { + contract: nc.contractAddress, + fee: 0, + nonce: ev.Request.MainTransaction.Nonce, + vub: nil, + method: "addPeerIR", + args: []any{ev.Node()}, + }, + }, nc.notaryInvokes, "invalid notary invokes") + }) + + t.Run("without notary", func(t *testing.T) { + t.Parallel() + + nc := &testNetmapClient{ + contractAddress: util.Uint160{47}, + } + + proc, err := newTestProc(t, func(p *Params) { + p.NotaryDisabled = true + p.NetmapClient = nc + }) + + require.NoError(t, err, "failed to create processor") + + var node netmap.NodeInfo + key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key") + node.SetPublicKey(key.Bytes()) + + ev := netmapEvent.AddPeer{ + NodeBytes: node.Marshal(), + } + proc.handleAddPeer(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var addPeerExp netmapclient.AddPeerPrm + addPeerExp.SetNodeInfo(node) + require.EqualValues(t, []netmapclient.AddPeerPrm{addPeerExp}, nc.addPeers, "invalid peers") + }) +} + +func TestUpdateState(t *testing.T) { + t.Parallel() + + t.Run("with notary", func(t *testing.T) { + t.Parallel() + ns := &testNodeStateSettings{ + maintAllowed: true, + } + nc := &testNetmapClient{} + + proc, err := newTestProc(t, func(p *Params) { + p.NotaryDisabled = true + p.NodeStateSettings = ns + p.NetmapClient = nc + }) + + require.NoError(t, err, "failed to create processor") + + key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key") + + ev := netmapEvent.UpdatePeer{ + State: netmapContract.NodeStateOnline, + PubKey: key, + Request: &payload.P2PNotaryRequest{ + MainTransaction: &transaction.Transaction{ + Nonce: 100, + }, + }, + } + proc.handleUpdateState(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + require.EqualValues(t, []*transaction.Transaction{ + ev.Request.MainTransaction, + }, nc.invokedTxs, "invalid invoked transactions") + }) + + t.Run("without notary", func(t *testing.T) { + t.Parallel() + ns := &testNodeStateSettings{ + maintAllowed: true, + } + nc := &testNetmapClient{} + + proc, err := newTestProc(t, func(p *Params) { + p.NetmapClient = nc + p.NodeStateSettings = ns + }) + + require.NoError(t, err, "failed to create processor") + + key, err := keys.NewPublicKeyFromString("038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35") + require.NoError(t, err, "failed to parse key") + + ev := netmapEvent.UpdatePeer{ + State: netmapContract.NodeStateOnline, + PubKey: key, + } + proc.handleUpdateState(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + var expUpdPeer netmapclient.UpdatePeerPrm + expUpdPeer.SetMaintenance() + expUpdPeer.SetOnline() + expUpdPeer.SetKey(ev.PubKey.Bytes()) + + require.EqualValues(t, []netmapclient.UpdatePeerPrm{expUpdPeer}, nc.peerStateUpdates, "invalid peer state updates") + }) +} + +func TestCleanupTick(t *testing.T) { + t.Parallel() + + t.Run("notary disabled", func(t *testing.T) { + t.Parallel() + + nc := &testNetmapClient{} + + proc, err := newTestProc(t, func(p *Params) { + p.NetmapClient = nc + p.NotaryDisabled = true + p.CleanupEnabled = true + }) + + require.NoError(t, err, "failed to create processor") + + key1Str := "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35" + proc.netmapSnapshot.lastAccess[key1Str] = epochStampWithNodeInfo{ + epochStamp: epochStamp{ + epoch: 95, + removeFlag: false, + }, + } + key2Str := "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3" + proc.netmapSnapshot.lastAccess[key2Str] = epochStampWithNodeInfo{ + epochStamp: epochStamp{ + epoch: 98, + removeFlag: false, + }, + } + + ev := netmapCleanupTick{ + epoch: 100, + txHash: util.Uint256{123}, + } + + proc.handleCleanupTick(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + keyExp, err := keys.NewPublicKeyFromString(key1Str) + require.NoError(t, err, "failed to parse expired key") + + updExp := netmapclient.UpdatePeerPrm{} + updExp.SetKey(keyExp.Bytes()) + updExp.SetHash(ev.TxHash()) + + require.EqualValues(t, []netmapclient.UpdatePeerPrm{updExp}, nc.peerStateUpdates, "invalid peer updates") + require.True(t, proc.netmapSnapshot.lastAccess[key1Str].removeFlag, "invalid expired removed flag") + require.False(t, proc.netmapSnapshot.lastAccess[key2Str].removeFlag, "invalid non expired removed flag") + }) + + t.Run("notary enabled", func(t *testing.T) { + t.Parallel() + + nc := &testNetmapClient{ + contractAddress: util.Uint160{111}, + } + proc, err := newTestProc(t, + func(p *Params) { + p.NetmapClient = nc + p.CleanupEnabled = true + }, + ) + + require.NoError(t, err, "failed to create processor") + + key1Str := "038c862959e56b43e20f79187c4fe9e0bc7c8c66c1603e6cf0ec7f87ab6b08dc35" + proc.netmapSnapshot.lastAccess[key1Str] = epochStampWithNodeInfo{ + epochStamp: epochStamp{ + epoch: 95, + removeFlag: false, + }, + } + key2Str := "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3" + proc.netmapSnapshot.lastAccess[key2Str] = epochStampWithNodeInfo{ + epochStamp: epochStamp{ + epoch: 98, + removeFlag: false, + }, + } + + ev := netmapCleanupTick{ + epoch: 100, + txHash: util.Uint256{123}, + } + + proc.handleCleanupTick(ev) + + for proc.pool.Running() > 0 { + time.Sleep(10 * time.Millisecond) + } + + keyExp, err := keys.NewPublicKeyFromString(key1Str) + require.NoError(t, err, "failed to parse expired key") + + updExp := netmapclient.UpdatePeerPrm{} + updExp.SetKey(keyExp.Bytes()) + updExp.SetHash(ev.TxHash()) + + require.EqualValues(t, []notaryInvoke{ + { + contract: nc.contractAddress, + fee: 0, + nonce: uint32(ev.epoch), + vub: nil, + method: "updateStateIR", + args: []any{int64(v2netmap.Offline), keyExp.Bytes()}, + }, + }, nc.notaryInvokes, "invalid notary invokes") + require.True(t, proc.netmapSnapshot.lastAccess[key1Str].removeFlag, "invalid expired removed flag") + require.False(t, proc.netmapSnapshot.lastAccess[key2Str].removeFlag, "invalid non expired removed flag") + }) +} + +func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { + ns := &testNodeStateSettings{} + es := &testEpochState{} + r := &testEpochResetter{} + as := &testAlphabetState{ + isAlphabet: true, + } + cc := &testContainerClient{} + nc := &testNetmapClient{} + eh := &testEventHandler{} + + p := &Params{ + Log: test.NewLogger(t, true), + PoolSize: 1, + CleanupEnabled: false, + CleanupThreshold: 3, + NotaryDisabled: false, + NodeStateSettings: ns, + NodeValidator: &testValidator{}, + EpochState: es, + EpochTimer: r, + AlphabetState: as, + ContainerWrapper: cc, + NetmapClient: nc, + NotaryDepositHandler: eh.Handle, + AlphabetSyncHandler: eh.Handle, + } + + nonDefault(p) + + return New(p) +} + +type testNodeStateSettings struct { + maintAllowed bool +} + +func (s *testNodeStateSettings) MaintenanceModeAllowed() error { + if s.maintAllowed { + return nil + } + return fmt.Errorf("maintenance mode not allowed") +} + +type testValidator struct{} + +func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error { + return nil +} + +type testEpochState struct { + counter uint64 + duration uint64 +} + +func (s *testEpochState) SetEpochCounter(c uint64) { + s.counter = c +} +func (s *testEpochState) EpochCounter() uint64 { + return s.counter +} +func (s *testEpochState) SetEpochDuration(d uint64) { + s.duration = d +} +func (s *testEpochState) EpochDuration() uint64 { + return s.duration +} + +type testEpochResetter struct { + timers []uint32 +} + +func (r *testEpochResetter) ResetEpochTimer(t uint32) error { + r.timers = append(r.timers, t) + return nil +} + +type testAlphabetState struct { + isAlphabet bool +} + +func (s *testAlphabetState) IsAlphabet() bool { + return s.isAlphabet +} + +type testContainerClient struct { + estimations []cntClient.StartEstimationPrm +} + +func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error { + c.estimations = append(c.estimations, p) + return nil +} + +type notaryInvoke struct { + contract util.Uint160 + fee fixedn.Fixed8 + nonce uint32 + vub *uint32 + method string + args []any +} + +type testNetmapClient struct { + contractAddress util.Uint160 + epochDuration uint64 + netmap *netmap.NetMap + txHeights map[util.Uint256]uint32 + + peerStateUpdates []netmapclient.UpdatePeerPrm + notaryInvokes []notaryInvoke + newEpochs []uint64 + addPeers []netmapclient.AddPeerPrm + invokedTxs []*transaction.Transaction +} + +func (c *testNetmapClient) UpdatePeerState(p netmapclient.UpdatePeerPrm) error { + c.peerStateUpdates = append(c.peerStateUpdates, p) + return nil +} +func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { + c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{ + contract: contract, + fee: fee, + nonce: nonce, + vub: vub, + method: method, + args: args, + }) + return nil +} +func (c *testNetmapClient) ContractAddress() util.Uint160 { + return c.contractAddress +} +func (c *testNetmapClient) EpochDuration() (uint64, error) { + return c.epochDuration, nil +} +func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { + if res, found := c.txHeights[h]; found { + return res, nil + } + return 0, fmt.Errorf("not found") +} +func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { + return c.netmap, nil +} +func (c *testNetmapClient) NewEpoch(epoch uint64, force bool) error { + c.newEpochs = append(c.newEpochs, epoch) + return nil +} +func (c *testNetmapClient) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { + return true, nil +} +func (c *testNetmapClient) AddPeer(p netmapclient.AddPeerPrm) error { + c.addPeers = append(c.addPeers, p) + return nil +} +func (c *testNetmapClient) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { + c.invokedTxs = append(c.invokedTxs, mainTx) + return nil +} + +type testEventHandler struct { + handledEvents []event.Event +} + +func (h *testEventHandler) Handle(e event.Event) { + h.handledEvents = append(h.handledEvents, e) +} diff --git a/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go b/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go deleted file mode 100644 index 4c859703d..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/subnet/calls.go +++ /dev/null @@ -1,42 +0,0 @@ -package subnet - -import ( - "fmt" - - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" -) - -// VerifyAndUpdate calls subnet contract's `NodeAllowed` method. -// Removes subnets that have not been approved by the contract. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { - prm := morphsubnet.NodeAllowedPrm{} - - err := n.IterateSubnets(func(id subnetid.ID) error { - // every node can be bootstrapped - // to the zero subnetwork - if subnetid.IsZero(id) { - return nil - } - - prm.SetID(id.Marshal()) - prm.SetNode(n.PublicKey()) - - res, err := v.subnetClient.NodeAllowed(prm) - if err != nil { - return fmt.Errorf("could not call `NodeAllowed` contract method: %w", err) - } - - if !res.Allowed() { - return netmap.ErrRemoveSubnet - } - - return nil - }) - if err != nil { - return fmt.Errorf("could not verify subnet entrance of the node: %w", err) - } - - return nil -} diff --git a/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go b/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go deleted file mode 100644 index f9ae4e614..000000000 --- a/pkg/innerring/processors/netmap/nodevalidation/subnet/validator.go +++ /dev/null @@ -1,41 +0,0 @@ -package subnet - -import ( - "errors" - - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" -) - -// Validator is an utility that verifies node subnet -// allowance. -// -// For correct operation, Validator must be created -// using the constructor (New). After successful creation, -// the Validator is immediately ready to work through API. -type Validator struct { - subnetClient *morphsubnet.Client -} - -// Prm groups the required parameters of the Validator's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - SubnetClient *morphsubnet.Client -} - -// New creates a new instance of the Validator. -// -// The created Validator does not require additional -// initialization and is completely ready for work. -func New(prm Prm) (*Validator, error) { - switch { - case prm.SubnetClient == nil: - return nil, errors.New("ir/nodeValidator: subnet client is not set") - } - - return &Validator{ - subnetClient: prm.SubnetClient, - }, nil -} diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index e4425ef17..45a08b377 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -2,6 +2,7 @@ package netmap import ( v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" @@ -9,7 +10,7 @@ import ( func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new netmap cleanup tick") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return } @@ -17,13 +18,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn("can't decode public key of netmap node", + np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info("vote to remove node from netmap", zap.String("key", s)) + np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -38,7 +39,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { err = np.netmapClient.UpdatePeerState(prm) } else { - err = np.netmapClient.Morph().NotaryInvoke( + err = np.netmapClient.MorphNotaryInvoke( np.netmapClient.ContractAddress(), 0, uint32(ev.epoch), @@ -48,13 +49,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { ) } if err != nil { - np.log.Error("can't invoke netmap.UpdateState", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn("can't iterate on netmap cleaner cache", + np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index ffcddc497..b655db9aa 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -1,9 +1,8 @@ package netmap import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "go.uber.org/zap" @@ -16,7 +15,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { epochDuration, err := np.netmapClient.EpochDuration() if err != nil { - np.log.Warn("can't get epoch duration", + np.log.Warn(logs.NetmapCantGetEpochDuration, zap.String("error", err.Error())) } else { np.epochState.SetEpochDuration(epochDuration) @@ -24,22 +23,22 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { np.epochState.SetEpochCounter(epoch) - h, err := np.netmapClient.Morph().TxHeight(ev.TxHash()) + h, err := np.netmapClient.MorphTxHeight(ev.TxHash()) if err != nil { - np.log.Warn("can't get transaction height", + np.log.Warn(logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), zap.String("error", err.Error())) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn("can't reset epoch timer", + np.log.Warn(logs.NetmapCantResetEpochTimer, zap.String("error", err.Error())) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { - np.log.Warn("can't get netmap snapshot to perform cleanup", + np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.String("error", err.Error())) return @@ -54,7 +53,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { err = np.containerWrp.StartEstimation(prm) if err != nil { - np.log.Warn("can't start container size estimation", + np.log.Warn(logs.NetmapCantStartContainerSizeEstimation, zap.Uint64("epoch", epoch), zap.String("error", err.Error())) } @@ -62,8 +61,6 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { np.netmapSnapshot.update(*networkMap, epoch) np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) - np.handleNewAudit(audit.NewAuditStartEvent(epoch)) - np.handleAuditSettlements(settlement.NewAuditEvent(epoch)) np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash())) np.handleNotaryDeposit(ev) } @@ -71,15 +68,15 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { // Process new epoch tick by invoking new epoch method in network map contract. func (np *Processor) processNewEpochTick() { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new epoch tick") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return } nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug("next epoch", zap.Uint64("value", nextEpoch)) + np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) - err := np.netmapClient.NewEpoch(nextEpoch) + err := np.netmapClient.NewEpoch(nextEpoch, false) if err != nil { - np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 3734bae01..9e6eeb53e 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -1,14 +1,12 @@ package netmap import ( - "bytes" "encoding/hex" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" "go.uber.org/zap" ) @@ -16,16 +14,16 @@ import ( // local epoch timer. func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore new peer notification") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return } // check if notary transaction is valid, see #976 if originalRequest := ev.NotaryRequest(); originalRequest != nil { tx := originalRequest.MainTransaction - ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers) + ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn("non-halt notary transaction", + np.log.Warn(logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -37,14 +35,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn("can't parse network map candidate") + np.log.Warn(logs.NetmapCantParseNetworkMapCandidate) return } // validate and update node info err := np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { - np.log.Warn("could not verify and update information about network map candidate", + np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.String("error", err.Error()), ) @@ -62,7 +60,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary) if updated { - np.log.Info("approving network map candidate", + np.log.Info(logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -75,7 +73,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { if nr := ev.NotaryRequest(); nr != nil { // create new notary request with the original nonce - err = np.netmapClient.Morph().NotaryInvoke( + err = np.netmapClient.MorphNotaryInvoke( np.netmapClient.ContractAddress(), 0, nr.MainTransaction.Nonce, @@ -89,7 +87,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { } if err != nil { - np.log.Error("can't invoke netmap.AddPeer", zap.Error(err)) + np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) } } } @@ -97,7 +95,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { // Process update peer notification by sending approval tx to the smart contract. func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore update peer notification") + np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return } @@ -110,7 +108,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { if ev.Maintenance() { err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { - np.log.Info("prevent switching node to maintenance state", + np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -119,7 +117,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { } if nr := ev.NotaryRequest(); nr != nil { - err = np.netmapClient.Morph().NotarySignAndInvokeTX(nr.MainTransaction) + err = np.netmapClient.MorphNotarySignAndInvokeTX(nr.MainTransaction) } else { prm := netmapclient.UpdatePeerPrm{} @@ -135,77 +133,6 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) { err = np.netmapClient.UpdatePeerState(prm) } if err != nil { - np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err)) - } -} - -func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) { - if !np.alphabetState.IsAlphabet() { - np.log.Info("non alphabet mode, ignore remove node from subnet notification") - return - } - - candidates, err := np.netmapClient.GetCandidates() - if err != nil { - np.log.Warn("could not get network map candidates", - zap.Error(err), - ) - return - } - - rawSubnet := ev.SubnetworkID() - var subnetToRemoveFrom subnetid.ID - - err = subnetToRemoveFrom.Unmarshal(rawSubnet) - if err != nil { - np.log.Warn("could not unmarshal subnet id", - zap.Error(err), - ) - return - } - - if subnetid.IsZero(subnetToRemoveFrom) { - np.log.Warn("got zero subnet in remove node notification") - return - } - - for i := range candidates { - if !bytes.Equal(candidates[i].PublicKey(), ev.Node()) { - continue - } - - err = candidates[i].IterateSubnets(func(subNetID subnetid.ID) error { - if subNetID.Equals(subnetToRemoveFrom) { - return netmap.ErrRemoveSubnet - } - - return nil - }) - if err != nil { - np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err)) - np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node()))) - - prm := netmapclient.UpdatePeerPrm{} - prm.SetKey(ev.Node()) - prm.SetHash(ev.TxHash()) - - err = np.netmapClient.UpdatePeerState(prm) - if err != nil { - np.log.Error("could not invoke netmap.UpdateState", zap.Error(err)) - return - } - } else { - prm := netmapclient.AddPeerPrm{} - prm.SetNodeInfo(candidates[i]) - prm.SetHash(ev.TxHash()) - - err = np.netmapClient.AddPeer(prm) - if err != nil { - np.log.Error("could not invoke netmap.AddPeer", zap.Error(err)) - return - } - } - - break + np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index de145d48c..c466cfb1b 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -4,15 +4,17 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" - subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" + "github.com/nspcc-dev/neo-go/pkg/core/transaction" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" "go.uber.org/zap" @@ -52,6 +54,23 @@ type ( VerifyAndUpdate(*netmap.NodeInfo) error } + Client interface { + UpdatePeerState(p netmapclient.UpdatePeerPrm) error + MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error + ContractAddress() util.Uint160 + EpochDuration() (uint64, error) + MorphTxHeight(h util.Uint256) (res uint32, err error) + NetMap() (*netmap.NetMap, error) + NewEpoch(epoch uint64, force bool) error + MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) + AddPeer(p netmapclient.AddPeerPrm) error + MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error + } + + ContainerClient interface { + StartEstimation(p cntClient.StartEstimationPrm) error + } + // Processor of events produced by network map contract // and new epoch ticker, because it is related to contract. Processor struct { @@ -61,17 +80,13 @@ type ( epochState EpochState alphabetState AlphabetState - netmapClient *nmClient.Client - containerWrp *container.Client - - subnetContract util.Uint160 + netmapClient Client + containerWrp ContainerClient netmapSnapshot cleanupTable - handleNewAudit event.Handler - handleAuditSettlements event.Handler - handleAlphabetSync event.Handler - handleNotaryDeposit event.Handler + handleAlphabetSync event.Handler + handleNotaryDeposit event.Handler nodeValidator NodeValidator @@ -84,19 +99,16 @@ type ( Params struct { Log *logger.Logger PoolSize int - NetmapClient *nmClient.Client + NetmapClient Client EpochTimer EpochTimerReseter EpochState EpochState AlphabetState AlphabetState CleanupEnabled bool CleanupThreshold uint64 // in epochs - ContainerWrapper *container.Client - SubnetContract *util.Uint160 + ContainerWrapper ContainerClient - HandleAudit event.Handler - AuditSettlementsHandler event.Handler - AlphabetSyncHandler event.Handler - NotaryDepositHandler event.Handler + AlphabetSyncHandler event.Handler + NotaryDepositHandler event.Handler NodeValidator NodeValidator @@ -107,10 +119,7 @@ type ( ) const ( - newEpochNotification = "NewEpoch" - addPeerNotification = "AddPeer" - updatePeerStateNotification = "UpdateState" - removeNodeNotification = "RemoveNode" + newEpochNotification = "NewEpoch" ) // New creates network map contract processor instance. @@ -124,10 +133,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: global state is not set") case p.AlphabetState == nil: return nil, errors.New("ir/netmap: global state is not set") - case p.HandleAudit == nil: - return nil, errors.New("ir/netmap: audit handler is not set") - case p.AuditSettlementsHandler == nil: - return nil, errors.New("ir/netmap: audit settlement handler is not set") case p.AlphabetSyncHandler == nil: return nil, errors.New("ir/netmap: alphabet sync handler is not set") case p.NotaryDepositHandler == nil: @@ -136,13 +141,11 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: container contract wrapper is not set") case p.NodeValidator == nil: return nil, errors.New("ir/netmap: node validator is not set") - case p.SubnetContract == nil: - return nil, errors.New("ir/netmap: subnet contract script hash is not set") case p.NodeStateSettings == nil: return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize)) + p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { @@ -158,10 +161,6 @@ func New(p *Params) (*Processor, error) { netmapClient: p.NetmapClient, containerWrp: p.ContainerWrapper, netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold), - handleNewAudit: p.HandleAudit, - subnetContract: *p.SubnetContract, - - handleAuditSettlements: p.AuditSettlementsHandler, handleAlphabetSync: p.AlphabetSyncHandler, @@ -181,13 +180,6 @@ func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInf var p event.NotificationParserInfo - // remove node from subnetwork event - p.SetScriptHash(np.subnetContract) - p.SetType(removeNodeNotification) - p.SetParser(subnetEvent.ParseRemoveNode) - - parsers = append(parsers, p) - p.SetScriptHash(np.netmapClient.ContractAddress()) // new epoch event @@ -195,20 +187,6 @@ func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInf p.SetParser(netmapEvent.ParseNewEpoch) parsers = append(parsers, p) - if !np.notaryDisabled { - return parsers - } - - // new peer event - p.SetType(addPeerNotification) - p.SetParser(netmapEvent.ParseAddPeer) - parsers = append(parsers, p) - - // update peer event - p.SetType(updatePeerStateNotification) - p.SetParser(netmapEvent.ParseUpdatePeer) - parsers = append(parsers, p) - return parsers } @@ -218,13 +196,6 @@ func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI var i event.NotificationHandlerInfo - // remove node from subnetwork event - i.SetScriptHash(np.subnetContract) - i.SetType(removeNodeNotification) - i.SetHandler(np.handleRemoveNode) - - handlers = append(handlers, i) - i.SetScriptHash(np.netmapClient.ContractAddress()) // new epoch handler @@ -232,20 +203,6 @@ func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI i.SetHandler(np.handleNewEpoch) handlers = append(handlers, i) - if !np.notaryDisabled { - return handlers - } - - // new peer handler - i.SetType(addPeerNotification) - i.SetHandler(np.handleAddPeer) - handlers = append(handlers, i) - - // update peer handler - i.SetType(updatePeerStateNotification) - i.SetHandler(np.handleUpdateState) - handlers = append(handlers, i) - return handlers } @@ -296,8 +253,3 @@ func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { return hh } - -// TimersHandlers for the 'Timers' event producer. -func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go new file mode 100644 index 000000000..255d498d3 --- /dev/null +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -0,0 +1,59 @@ +package netmap + +import ( + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/core/transaction" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/util" +) + +func NewNetmapClient(netmapClient *netmapclient.Client) Client { + return &netmapClientWrapper{ + netmapClient: netmapClient, + } +} + +type netmapClientWrapper struct { + netmapClient *netmapclient.Client +} + +func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error { + return w.netmapClient.UpdatePeerState(p) +} + +func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { + return w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...) +} + +func (w *netmapClientWrapper) ContractAddress() util.Uint160 { + return w.netmapClient.ContractAddress() +} + +func (w *netmapClientWrapper) EpochDuration() (uint64, error) { + return w.netmapClient.EpochDuration() +} + +func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { + return w.netmapClient.Morph().TxHeight(h) +} + +func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { + return w.netmapClient.NetMap() +} + +func (w *netmapClientWrapper) NewEpoch(epoch uint64, force bool) error { + return w.netmapClient.NewEpoch(epoch, force) +} + +func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { + return w.netmapClient.Morph().IsValidScript(script, signers) +} + +func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error { + return w.netmapClient.AddPeer(p) +} + +func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { + return w.netmapClient.Morph().NotarySignAndInvokeTX(mainTx) +} diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go deleted file mode 100644 index 36c9579e5..000000000 --- a/pkg/innerring/processors/reputation/handlers.go +++ /dev/null @@ -1,28 +0,0 @@ -package reputation - -import ( - "encoding/hex" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" - "go.uber.org/zap" -) - -func (rp *Processor) handlePutReputation(ev event.Event) { - put := ev.(reputationEvent.Put) - peerID := put.PeerID() - - // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library - rp.log.Info("notification", - zap.String("type", "reputation put"), - zap.String("peer_id", hex.EncodeToString(peerID.PublicKey()))) - - // send event to the worker pool - - err := rp.pool.Submit(func() { rp.processPut(&put) }) - if err != nil { - // there system can be moved into controlled degradation stage - rp.log.Warn("reputation worker pool drained", - zap.Int("capacity", rp.pool.Cap())) - } -} diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go deleted file mode 100644 index 31e93763b..000000000 --- a/pkg/innerring/processors/reputation/process_put.go +++ /dev/null @@ -1,98 +0,0 @@ -package reputation - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - - repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" - reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -var errWrongManager = errors.New("got manager that is incorrect for peer") - -func (rp *Processor) processPut(e *reputationEvent.Put) { - if !rp.alphabetState.IsAlphabet() { - rp.log.Info("non alphabet mode, ignore reputation put notification") - return - } - - epoch := e.Epoch() - id := e.PeerID() - value := e.Value() - - // check if epoch is valid - currentEpoch := rp.epochState.EpochCounter() - if epoch >= currentEpoch { - rp.log.Info("ignore reputation value", - zap.String("reason", "invalid epoch number"), - zap.Uint64("trust_epoch", epoch), - zap.Uint64("local_epoch", currentEpoch)) - - return - } - - // check signature - if !value.VerifySignature() { - rp.log.Info("ignore reputation value", - zap.String("reason", "invalid signature"), - ) - - return - } - - // check if manager is correct - if err := rp.checkManagers(epoch, value.Manager(), id); err != nil { - rp.log.Info("ignore reputation value", - zap.String("reason", "wrong manager"), - zap.String("error", err.Error())) - - return - } - - rp.approvePutReputation(e) -} - -func (rp *Processor) checkManagers(e uint64, mng apireputation.PeerID, peer apireputation.PeerID) error { - mm, err := rp.mngBuilder.BuildManagers(e, peer) - if err != nil { - return fmt.Errorf("could not build managers: %w", err) - } - - for _, m := range mm { - // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library - if bytes.Equal(mng.PublicKey(), m.PublicKey()) { - return nil - } - } - - return errWrongManager -} - -func (rp *Processor) approvePutReputation(e *reputationEvent.Put) { - var ( - id = e.PeerID() - err error - ) - - if nr := e.NotaryRequest(); nr != nil { - // put event was received via Notary service - err = rp.reputationWrp.Morph().NotarySignAndInvokeTX(nr.MainTransaction) - } else { - args := repClient.PutPrm{} - args.SetEpoch(e.Epoch()) - args.SetPeerID(id) - args.SetValue(e.Value()) - - err = rp.reputationWrp.Put(args) - } - if err != nil { - // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library - rp.log.Warn("can't send approval tx for reputation value", - zap.String("peer_id", hex.EncodeToString(id.PublicKey())), - zap.String("error", err.Error())) - } -} diff --git a/pkg/innerring/processors/reputation/processor.go b/pkg/innerring/processors/reputation/processor.go deleted file mode 100644 index 990358257..000000000 --- a/pkg/innerring/processors/reputation/processor.go +++ /dev/null @@ -1,155 +0,0 @@ -package reputation - -import ( - "errors" - "fmt" - - repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" -) - -type ( - // EpochState is a callback interface for inner ring global state. - EpochState interface { - EpochCounter() uint64 - } - - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet() bool - } - - // Processor of events produced by reputation contract. - Processor struct { - log *logger.Logger - pool *ants.Pool - - epochState EpochState - alphabetState AlphabetState - - reputationWrp *repClient.Client - - mngBuilder common.ManagerBuilder - - notaryDisabled bool - } - - // Params of the processor constructor. - Params struct { - Log *logger.Logger - PoolSize int - EpochState EpochState - AlphabetState AlphabetState - ReputationWrapper *repClient.Client - ManagerBuilder common.ManagerBuilder - NotaryDisabled bool - } -) - -const ( - putReputationNotification = "reputationPut" -) - -// New creates reputation contract processor instance. -func New(p *Params) (*Processor, error) { - switch { - case p.Log == nil: - return nil, errors.New("ir/reputation: logger is not set") - case p.EpochState == nil: - return nil, errors.New("ir/reputation: global state is not set") - case p.AlphabetState == nil: - return nil, errors.New("ir/reputation: global state is not set") - case p.ReputationWrapper == nil: - return nil, errors.New("ir/reputation: reputation contract wrapper is not set") - case p.ManagerBuilder == nil: - return nil, errors.New("ir/reputation: manager builder is not set") - } - - p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize)) - - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) - if err != nil { - return nil, fmt.Errorf("ir/reputation: can't create worker pool: %w", err) - } - - return &Processor{ - log: p.Log, - pool: pool, - epochState: p.EpochState, - alphabetState: p.AlphabetState, - reputationWrp: p.ReputationWrapper, - mngBuilder: p.ManagerBuilder, - notaryDisabled: p.NotaryDisabled, - }, nil -} - -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (rp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - if !rp.notaryDisabled { - return nil - } - - var parsers []event.NotificationParserInfo - - // put reputation event - put := event.NotificationParserInfo{} - put.SetType(putReputationNotification) - put.SetScriptHash(rp.reputationWrp.ContractAddress()) - put.SetParser(reputationEvent.ParsePut) - parsers = append(parsers, put) - - return parsers -} - -// ListenerNotificationHandlers for the 'event.Listener' event producer. -func (rp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - if !rp.notaryDisabled { - return nil - } - - var handlers []event.NotificationHandlerInfo - - // put reputation handler - put := event.NotificationHandlerInfo{} - put.SetType(putReputationNotification) - put.SetScriptHash(rp.reputationWrp.ContractAddress()) - put.SetHandler(rp.handlePutReputation) - handlers = append(handlers, put) - - return handlers -} - -// ListenerNotaryParsers for the 'event.Listener' notary event producer. -func (rp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { - var p event.NotaryParserInfo - - p.SetMempoolType(mempoolevent.TransactionAdded) - p.SetRequestType(reputationEvent.PutNotaryEvent) - p.SetScriptHash(rp.reputationWrp.ContractAddress()) - p.SetParser(reputationEvent.ParsePutNotary) - - return []event.NotaryParserInfo{p} -} - -// ListenerNotaryHandlers for the 'event.Listener' notary event producer. -func (rp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { - var h event.NotaryHandlerInfo - - h.SetMempoolType(mempoolevent.TransactionAdded) - h.SetRequestType(reputationEvent.PutNotaryEvent) - h.SetScriptHash(rp.reputationWrp.ContractAddress()) - h.SetHandler(rp.handlePutReputation) - - return []event.NotaryHandlerInfo{h} -} - -// TimersHandlers for the 'Timers' event producer. -func (rp *Processor) TimersHandlers() []event.NotificationHandlerInfo { - return nil -} diff --git a/pkg/innerring/processors/settlement/audit/calculate.go b/pkg/innerring/processors/settlement/audit/calculate.go deleted file mode 100644 index d819865d8..000000000 --- a/pkg/innerring/processors/settlement/audit/calculate.go +++ /dev/null @@ -1,335 +0,0 @@ -package audit - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "encoding/hex" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" -) - -// CalculatePrm groups the required parameters of -// Calculator.CalculateForEpoch call. -type CalculatePrm struct { - // Number of epoch to perform the calculation. - Epoch uint64 -} - -type singleResultCtx struct { - eAudit uint64 - - auditResult *audit.Result - - log *logger.Logger - - txTable *common.TransferTable - - cnrInfo common.ContainerInfo - - cnrNodes []common.NodeInfo - - passNodes map[string]common.NodeInfo - - sumSGSize *big.Int - - auditFee *big.Int -} - -var ( - bigGB = big.NewInt(1 << 30) - bigZero = big.NewInt(0) - bigOne = big.NewInt(1) -) - -// Calculate calculates payments for audit results in a specific epoch of the network. -// Wraps the results in a money transfer transaction and sends it to the network. -func (c *Calculator) Calculate(p *CalculatePrm) { - log := &logger.Logger{Logger: c.opts.log.With( - zap.Uint64("current epoch", p.Epoch), - )} - - if p.Epoch == 0 { - log.Info("settlements are ignored for zero epoch") - return - } - - log.Info("calculate audit settlements") - - log.Debug("getting results for the previous epoch") - prevEpoch := p.Epoch - 1 - - auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch) - if err != nil { - log.Error("could not collect audit results") - return - } else if len(auditResults) == 0 { - log.Debug("no audit results in previous epoch") - return - } - - auditFee, err := c.prm.AuditFeeFetcher.AuditFee() - if err != nil { - log.Warn("can't fetch audit fee from network config", - zap.String("error", err.Error())) - auditFee = 0 - } - - log.Debug("processing audit results", - zap.Int("number", len(auditResults)), - ) - - table := common.NewTransferTable() - - for i := range auditResults { - c.processResult(&singleResultCtx{ - log: log, - auditResult: auditResults[i], - txTable: table, - auditFee: big.NewInt(0).SetUint64(auditFee), - }) - } - - log.Debug("processing transfers") - - common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch)) -} - -func (c *Calculator) processResult(ctx *singleResultCtx) { - ctx.log = &logger.Logger{Logger: ctx.log.With( - zap.Stringer("cid", ctx.containerID()), - zap.Uint64("audit epoch", ctx.auditResult.Epoch()), - )} - - ctx.log.Debug("reading information about the container") - - ok := c.readContainerInfo(ctx) - if !ok { - return - } - - ctx.log.Debug("building placement") - - ok = c.buildPlacement(ctx) - if !ok { - return - } - - ctx.log.Debug("collecting passed nodes") - - ok = c.collectPassNodes(ctx) - if !ok { - return - } - - ctx.log.Debug("calculating sum of the sizes of all storage groups") - - ok = c.sumSGSizes(ctx) - if !ok { - return - } - - ctx.log.Debug("filling transfer table") - - c.fillTransferTable(ctx) -} - -func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool { - cnr, ok := ctx.auditResult.Container() - if !ok { - ctx.log.Error("missing container in audit result") - return false - } - - var err error - - ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr) - if err != nil { - ctx.log.Error("could not get container info", - zap.String("error", err.Error()), - ) - } - - return err == nil -} - -func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool { - var err error - - ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID()) - if err != nil { - ctx.log.Error("could not get container nodes", - zap.String("error", err.Error()), - ) - } - - empty := len(ctx.cnrNodes) == 0 - if empty { - ctx.log.Debug("empty list of container nodes") - } - - return err == nil && !empty -} - -func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool { - ctx.passNodes = make(map[string]common.NodeInfo) - - for _, cnrNode := range ctx.cnrNodes { - // TODO(@cthulhu-rider): neofs-sdk-go#241 use dedicated method - ctx.auditResult.IteratePassedStorageNodes(func(passNode []byte) bool { - if !bytes.Equal(cnrNode.PublicKey(), passNode) { - return true - } - - failed := false - - ctx.auditResult.IterateFailedStorageNodes(func(failNode []byte) bool { - failed = bytes.Equal(cnrNode.PublicKey(), failNode) - return !failed - }) - - if !failed { - ctx.passNodes[hex.EncodeToString(passNode)] = cnrNode - } - - return false - }) - } - - empty := len(ctx.passNodes) == 0 - if empty { - ctx.log.Debug("none of the container nodes passed the audit") - } - - return !empty -} - -func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool { - sumPassSGSize := uint64(0) - fail := false - - var addr oid.Address - addr.SetContainer(ctx.containerID()) - - ctx.auditResult.IteratePassedStorageGroups(func(id oid.ID) bool { - addr.SetObject(id) - - sgInfo, err := c.prm.SGStorage.SGInfo(addr) - if err != nil { - ctx.log.Error("could not get SG info", - zap.String("id", id.String()), - zap.String("error", err.Error()), - ) - - fail = true - - return false // we also can continue and calculate at least some part - } - - sumPassSGSize += sgInfo.Size() - - return true - }) - - if fail { - return false - } - - if sumPassSGSize == 0 { - ctx.log.Debug("zero sum SG size") - return false - } - - ctx.sumSGSize = big.NewInt(int64(sumPassSGSize)) - - return true -} - -func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { - cnrOwner := ctx.cnrInfo.Owner() - - // add txs to pay for storage node - for k, info := range ctx.passNodes { - ownerID, err := c.prm.AccountStorage.ResolveKey(info) - if err != nil { - ctx.log.Error("could not resolve public key of the storage node", - zap.String("error", err.Error()), - zap.String("key", k), - ) - - return false // we also can continue and calculate at least some part - } - - price := info.Price() - - ctx.log.Debug("calculating storage node salary for audit (GASe-12)", - zap.Stringer("sum SG size", ctx.sumSGSize), - zap.Stringer("price", price), - ) - - fee := big.NewInt(0).Mul(price, ctx.sumSGSize) - fee.Div(fee, bigGB) - - if fee.Cmp(bigZero) == 0 { - fee.Add(fee, bigOne) - } - - ctx.txTable.Transfer(&common.TransferTx{ - From: cnrOwner, - To: *ownerID, - Amount: fee, - }) - } - - // add txs to pay inner ring node for audit result - auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey()) - if err != nil { - ctx.log.Error("could not parse public key of the inner ring node", - zap.String("error", err.Error()), - zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())), - ) - - return false - } - - ctx.txTable.Transfer(&common.TransferTx{ - From: cnrOwner, - To: *auditIR, - Amount: ctx.auditFee, - }) - - return false -} - -func (c *singleResultCtx) containerID() cid.ID { - cnr, _ := c.auditResult.Container() - return cnr -} - -func (c *singleResultCtx) auditEpoch() uint64 { - if c.eAudit == 0 { - c.eAudit = c.auditResult.Epoch() - } - - return c.eAudit -} - -func ownerFromKey(key []byte) (*user.ID, error) { - pubKey, err := keys.NewPublicKeyFromBytes(key, elliptic.P256()) - if err != nil { - return nil, err - } - - var id user.ID - user.IDFromKey(&id, (ecdsa.PublicKey)(*pubKey)) - - return &id, nil -} diff --git a/pkg/innerring/processors/settlement/audit/calculator.go b/pkg/innerring/processors/settlement/audit/calculator.go deleted file mode 100644 index fb8d82071..000000000 --- a/pkg/innerring/processors/settlement/audit/calculator.go +++ /dev/null @@ -1,48 +0,0 @@ -package audit - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Calculator represents a component for calculating payments -// based on data audit results and sending remittances to the chain. -type Calculator struct { - prm *CalculatorPrm - - opts *options -} - -// CalculatorOption is a Calculator constructor's option. -type CalculatorOption func(*options) - -type options struct { - log *logger.Logger -} - -func defaultOptions() *options { - return &options{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// NewCalculator creates, initializes and returns a new Calculator instance. -func NewCalculator(p *CalculatorPrm, opts ...CalculatorOption) *Calculator { - o := defaultOptions() - - for i := range opts { - opts[i](o) - } - - return &Calculator{ - prm: p, - opts: o, - } -} - -// WithLogger returns an option to specify the logging component. -func WithLogger(l *logger.Logger) CalculatorOption { - return func(o *options) { - o.log = l - } -} diff --git a/pkg/innerring/processors/settlement/audit/prm.go b/pkg/innerring/processors/settlement/audit/prm.go deleted file mode 100644 index d357f0d4f..000000000 --- a/pkg/innerring/processors/settlement/audit/prm.go +++ /dev/null @@ -1,49 +0,0 @@ -package audit - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// CalculatorPrm groups the parameters of Calculator's constructor. -type CalculatorPrm struct { - ResultStorage ResultStorage - - ContainerStorage common.ContainerStorage - - PlacementCalculator common.PlacementCalculator - - SGStorage SGStorage - - AccountStorage common.AccountStorage - - Exchanger common.Exchanger - - AuditFeeFetcher FeeFetcher -} - -// ResultStorage is an interface of storage of the audit results. -type ResultStorage interface { - // Must return all audit results by epoch number. - AuditResultsForEpoch(epoch uint64) ([]*audit.Result, error) -} - -// SGInfo groups the data about FrostFS storage group -// necessary for calculating audit fee. -type SGInfo interface { - // Must return sum size of the all group members. - Size() uint64 -} - -// SGStorage is an interface of storage of the storage groups. -type SGStorage interface { - // Must return information about the storage group by address. - SGInfo(oid.Address) (SGInfo, error) -} - -// FeeFetcher wraps AuditFee method that returns audit fee price from -// the network configuration. -type FeeFetcher interface { - AuditFee() (uint64, error) -} diff --git a/pkg/innerring/processors/settlement/basic/collect.go b/pkg/innerring/processors/settlement/basic/collect.go deleted file mode 100644 index ee7354c4f..000000000 --- a/pkg/innerring/processors/settlement/basic/collect.go +++ /dev/null @@ -1,113 +0,0 @@ -package basic - -import ( - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "go.uber.org/zap" -) - -var ( - bigGB = big.NewInt(1 << 30) - bigZero = big.NewInt(0) - bigOne = big.NewInt(1) -) - -func (inc *IncomeSettlementContext) Collect() { - inc.mu.Lock() - defer inc.mu.Unlock() - - cachedRate, err := inc.rate.BasicRate() - if err != nil { - inc.log.Error("can't get basic income rate", - zap.String("error", err.Error())) - - return - } - - if cachedRate == 0 { - inc.noop = true - return - } - - cnrEstimations, err := inc.estimations.Estimations(inc.epoch) - if err != nil { - inc.log.Error("can't fetch container size estimations", - zap.Uint64("epoch", inc.epoch), - zap.String("error", err.Error())) - - return - } - - txTable := common.NewTransferTable() - - for i := range cnrEstimations { - owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID) - if err != nil { - inc.log.Warn("can't fetch container info", - zap.Uint64("epoch", inc.epoch), - zap.Stringer("container_id", cnrEstimations[i].ContainerID), - zap.String("error", err.Error())) - - continue - } - - cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID) - if err != nil { - inc.log.Debug("can't fetch container info", - zap.Uint64("epoch", inc.epoch), - zap.Stringer("container_id", cnrEstimations[i].ContainerID), - zap.String("error", err.Error())) - - continue - } - - avg := inc.avgEstimation(cnrEstimations[i]) // average container size per node - total := calculateBasicSum(avg, cachedRate, len(cnrNodes)) - - // fill distribute asset table - for i := range cnrNodes { - inc.distributeTable.Put(cnrNodes[i].PublicKey(), avg) - } - - txTable.Transfer(&common.TransferTx{ - From: owner.Owner(), - To: inc.bankOwner, - Amount: total, - }) - } - - common.TransferAssets(inc.exchange, txTable, common.BasicIncomeCollectionDetails(inc.epoch)) -} - -// avgEstimation returns estimation value for a single container. Right now it -// simply calculates an average of all announcements, however it can be smarter and -// base the result on reputation of the announcers and clever math. -func (inc *IncomeSettlementContext) avgEstimation(e *cntClient.Estimations) (avg uint64) { - if len(e.Values) == 0 { - return 0 - } - - for i := range e.Values { - avg += e.Values[i].Size - } - - return avg / uint64(len(e.Values)) -} - -func calculateBasicSum(size, rate uint64, ln int) *big.Int { - bigRate := big.NewInt(int64(rate)) - - total := size * uint64(ln) - - price := big.NewInt(0).SetUint64(total) - price.Mul(price, bigRate) - price.Div(price, bigGB) - - if price.Cmp(bigZero) == 0 { - price.Add(price, bigOne) - } - - return price -} diff --git a/pkg/innerring/processors/settlement/basic/context.go b/pkg/innerring/processors/settlement/basic/context.go deleted file mode 100644 index 59bedf2e4..000000000 --- a/pkg/innerring/processors/settlement/basic/context.go +++ /dev/null @@ -1,80 +0,0 @@ -package basic - -import ( - "math/big" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -type ( - EstimationFetcher interface { - Estimations(uint64) ([]*container.Estimations, error) - } - - RateFetcher interface { - BasicRate() (uint64, error) - } - - // BalanceFetcher uses NEP-17 compatible balance contract. - BalanceFetcher interface { - Balance(id user.ID) (*big.Int, error) - } - - IncomeSettlementContext struct { - mu sync.Mutex // lock to prevent collection and distribution in the same time - - noop bool - - log *logger.Logger - epoch uint64 - - rate RateFetcher - estimations EstimationFetcher - balances BalanceFetcher - container common.ContainerStorage - placement common.PlacementCalculator - exchange common.Exchanger - accounts common.AccountStorage - - bankOwner user.ID - - // this table is not thread safe, make sure you use it with mu.Lock() - distributeTable *NodeSizeTable - } - - IncomeSettlementContextPrms struct { - Log *logger.Logger - Epoch uint64 - Rate RateFetcher - Estimations EstimationFetcher - Balances BalanceFetcher - Container common.ContainerStorage - Placement common.PlacementCalculator - Exchange common.Exchanger - Accounts common.AccountStorage - } -) - -func NewIncomeSettlementContext(p *IncomeSettlementContextPrms) *IncomeSettlementContext { - res := &IncomeSettlementContext{ - log: p.Log, - epoch: p.Epoch, - rate: p.Rate, - estimations: p.Estimations, - balances: p.Balances, - container: p.Container, - placement: p.Placement, - exchange: p.Exchange, - accounts: p.Accounts, - distributeTable: NewNodeSizeTable(), - } - - res.bankOwner.SetScriptHash(util.Uint160{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) - - return res -} diff --git a/pkg/innerring/processors/settlement/basic/distribute.go b/pkg/innerring/processors/settlement/basic/distribute.go deleted file mode 100644 index e085f1e22..000000000 --- a/pkg/innerring/processors/settlement/basic/distribute.go +++ /dev/null @@ -1,58 +0,0 @@ -package basic - -import ( - "encoding/hex" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - "go.uber.org/zap" -) - -func (inc *IncomeSettlementContext) Distribute() { - inc.mu.Lock() - defer inc.mu.Unlock() - - if inc.noop { - return - } - - txTable := common.NewTransferTable() - - bankBalance, err := inc.balances.Balance(inc.bankOwner) - if err != nil { - inc.log.Error("can't fetch balance of banking account", - zap.String("error", err.Error())) - - return - } - - total := inc.distributeTable.Total() - - inc.distributeTable.Iterate(func(key []byte, n *big.Int) { - nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key)) - if err != nil { - inc.log.Warn("can't transform public key to owner id", - zap.String("public_key", hex.EncodeToString(key)), - zap.String("error", err.Error())) - - return - } - - txTable.Transfer(&common.TransferTx{ - From: inc.bankOwner, - To: *nodeOwner, - Amount: normalizedValue(n, total, bankBalance), - }) - }) - - common.TransferAssets(inc.exchange, txTable, common.BasicIncomeDistributionDetails(inc.epoch)) -} - -func normalizedValue(n, total, limit *big.Int) *big.Int { - if limit.Cmp(bigZero) == 0 { - return big.NewInt(0) - } - - n.Mul(n, limit) - return n.Div(n, total) -} diff --git a/pkg/innerring/processors/settlement/basic/distribute_test.go b/pkg/innerring/processors/settlement/basic/distribute_test.go deleted file mode 100644 index 24eb0db3d..000000000 --- a/pkg/innerring/processors/settlement/basic/distribute_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package basic - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/require" -) - -type normalizedValueCase struct { - name string - n, total, limit uint64 - expected uint64 -} - -func TestNormalizedValues(t *testing.T) { - testCases := []normalizedValueCase{ - { - name: "zero limit", - n: 50, - total: 100, - limit: 0, - expected: 0, - }, - { - name: "scale down", - n: 50, - total: 100, - limit: 10, - expected: 5, - }, - { - name: "scale up", - n: 50, - total: 100, - limit: 1000, - expected: 500, - }, - } - - for _, testCase := range testCases { - testNormalizedValues(t, testCase) - } -} - -func testNormalizedValues(t *testing.T, c normalizedValueCase) { - n := big.NewInt(0).SetUint64(c.n) - total := big.NewInt(0).SetUint64(c.total) - limit := big.NewInt(0).SetUint64(c.limit) - exp := big.NewInt(0).SetUint64(c.expected) - - got := normalizedValue(n, total, limit) - require.Zero(t, exp.Cmp(got), c.name) -} diff --git a/pkg/innerring/processors/settlement/basic/util.go b/pkg/innerring/processors/settlement/basic/util.go deleted file mode 100644 index 258bae46f..000000000 --- a/pkg/innerring/processors/settlement/basic/util.go +++ /dev/null @@ -1,44 +0,0 @@ -package basic - -import ( - "math/big" -) - -// NodeSizeTable is not thread safe, make sure it is accessed with external -// locks or in single routine. -type NodeSizeTable struct { - prices map[string]uint64 - total uint64 -} - -func (t *NodeSizeTable) Put(id []byte, avg uint64) { - t.prices[string(id)] += avg - t.total += avg -} - -func (t *NodeSizeTable) Total() *big.Int { - return big.NewInt(0).SetUint64(t.total) -} - -func (t *NodeSizeTable) Iterate(f func([]byte, *big.Int)) { - for k, v := range t.prices { - n := big.NewInt(0).SetUint64(v) - f([]byte(k), n) - } -} - -func NewNodeSizeTable() *NodeSizeTable { - return &NodeSizeTable{ - prices: make(map[string]uint64), - } -} - -type nodeInfoWrapper []byte - -func (nodeInfoWrapper) Price() *big.Int { - panic("should not be used") -} - -func (n nodeInfoWrapper) PublicKey() []byte { - return n -} diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go deleted file mode 100644 index 33191662b..000000000 --- a/pkg/innerring/processors/settlement/calls.go +++ /dev/null @@ -1,132 +0,0 @@ -package settlement - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// HandleAuditEvent catches a new AuditEvent and -// adds AuditProcessor call to the execution queue. -func (p *Processor) HandleAuditEvent(e event.Event) { - ev := e.(AuditEvent) - - epoch := ev.Epoch() - - if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore audit payments") - - return - } - - log := &logger.Logger{Logger: p.log.With( - zap.Uint64("epoch", epoch), - )} - - log.Info("new audit settlement event") - - if epoch == 0 { - log.Debug("ignore genesis epoch") - return - } - - handler := &auditEventHandler{ - log: log, - epoch: epoch, - proc: p.auditProc, - } - - err := p.pool.Submit(handler.handle) - if err != nil { - log.Warn("could not add handler of AuditEvent to queue", - zap.String("error", err.Error()), - ) - - return - } - - log.Debug("AuditEvent handling successfully scheduled") -} - -func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { - ev := e.(BasicIncomeCollectEvent) - epoch := ev.Epoch() - - if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore income collection event") - - return - } - - p.log.Info("start basic income collection", - zap.Uint64("epoch", epoch)) - - p.contextMu.Lock() - defer p.contextMu.Unlock() - - if _, ok := p.incomeContexts[epoch]; ok { - p.log.Error("income context already exists", - zap.Uint64("epoch", epoch)) - - return - } - - incomeCtx, err := p.basicIncome.CreateContext(epoch) - if err != nil { - p.log.Error("can't create income context", - zap.String("error", err.Error())) - - return - } - - p.incomeContexts[epoch] = incomeCtx - - err = p.pool.Submit(func() { - incomeCtx.Collect() - }) - if err != nil { - p.log.Warn("could not add handler of basic income collection to queue", - zap.String("error", err.Error()), - ) - - return - } -} - -func (p *Processor) HandleIncomeDistributionEvent(e event.Event) { - ev := e.(BasicIncomeDistributeEvent) - epoch := ev.Epoch() - - if !p.state.IsAlphabet() { - p.log.Info("non alphabet mode, ignore income distribution event") - - return - } - - p.log.Info("start basic income distribution", - zap.Uint64("epoch", epoch)) - - p.contextMu.Lock() - defer p.contextMu.Unlock() - - incomeCtx, ok := p.incomeContexts[epoch] - delete(p.incomeContexts, epoch) - - if !ok { - p.log.Warn("income context distribution does not exists", - zap.Uint64("epoch", epoch)) - - return - } - - err := p.pool.Submit(func() { - incomeCtx.Distribute() - }) - if err != nil { - p.log.Warn("could not add handler of basic income distribution to queue", - zap.String("error", err.Error()), - ) - - return - } -} diff --git a/pkg/innerring/processors/settlement/common/details.go b/pkg/innerring/processors/settlement/common/details.go deleted file mode 100644 index 1cf719f63..000000000 --- a/pkg/innerring/processors/settlement/common/details.go +++ /dev/null @@ -1,33 +0,0 @@ -package common - -import ( - "encoding/binary" -) - -var ( - auditPrefix = []byte{0x40} - basicIncomeCollectionPrefix = []byte{0x41} - basicIncomeDistributionPrefix = []byte{0x42} -) - -func AuditSettlementDetails(epoch uint64) []byte { - return details(auditPrefix, epoch) -} - -func BasicIncomeCollectionDetails(epoch uint64) []byte { - return details(basicIncomeCollectionPrefix, epoch) -} - -func BasicIncomeDistributionDetails(epoch uint64) []byte { - return details(basicIncomeDistributionPrefix, epoch) -} - -func details(prefix []byte, epoch uint64) []byte { - prefixLen := len(prefix) - buf := make([]byte, prefixLen+8) - - copy(buf, prefix) - binary.LittleEndian.PutUint64(buf[prefixLen:], epoch) - - return buf -} diff --git a/pkg/innerring/processors/settlement/common/details_test.go b/pkg/innerring/processors/settlement/common/details_test.go deleted file mode 100644 index 9755e6aef..000000000 --- a/pkg/innerring/processors/settlement/common/details_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAuditSettlementDetails(t *testing.T) { - var n uint64 = 1994 // 0x7CA - exp := []byte{0x40, 0xCA, 0x07, 0, 0, 0, 0, 0, 0} - got := AuditSettlementDetails(n) - require.Equal(t, exp, got) -} - -func TestBasicIncomeCollectionDetails(t *testing.T) { - var n uint64 = 1994 // 0x7CA - exp := []byte{0x41, 0xCA, 0x07, 0, 0, 0, 0, 0, 0} - got := BasicIncomeCollectionDetails(n) - require.Equal(t, exp, got) -} - -func TestBasicIncomeDistributionDetails(t *testing.T) { - var n uint64 = 1994 // 0x7CA - exp := []byte{0x42, 0xCA, 0x07, 0, 0, 0, 0, 0, 0} - got := BasicIncomeDistributionDetails(n) - require.Equal(t, exp, got) -} diff --git a/pkg/innerring/processors/settlement/common/types.go b/pkg/innerring/processors/settlement/common/types.go deleted file mode 100644 index 9dca0fd0d..000000000 --- a/pkg/innerring/processors/settlement/common/types.go +++ /dev/null @@ -1,54 +0,0 @@ -package common - -import ( - "math/big" - - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// NodeInfo groups the data about the storage node -// necessary for calculating audit fees. -type NodeInfo interface { - // Must return storage price of the node for one epoch in GASe-12. - Price() *big.Int - - // Must return public key of the node. - PublicKey() []byte -} - -// ContainerInfo groups the data about FrostFS container -// necessary for calculating audit fee. -type ContainerInfo interface { - // Must return identifier of the container owner. - Owner() user.ID -} - -// ContainerStorage is an interface of -// storage of the FrostFS containers. -type ContainerStorage interface { - // Must return information about the container by ID. - ContainerInfo(cid.ID) (ContainerInfo, error) -} - -// PlacementCalculator is a component interface -// that builds placement vectors. -type PlacementCalculator interface { - // Must return information about the nodes from container by its ID of the given epoch. - ContainerNodes(uint64, cid.ID) ([]NodeInfo, error) -} - -// AccountStorage is an network member accounts interface. -type AccountStorage interface { - // Must resolve information about the storage node - // to its ID in system. - ResolveKey(NodeInfo) (*user.ID, error) -} - -// Exchanger is an interface of monetary component. -type Exchanger interface { - // Must transfer amount of GASe-12 from sender to recipient. - // - // Amount must be positive. - Transfer(sender, recipient user.ID, amount *big.Int, details []byte) -} diff --git a/pkg/innerring/processors/settlement/common/util.go b/pkg/innerring/processors/settlement/common/util.go deleted file mode 100644 index 6f40fb577..000000000 --- a/pkg/innerring/processors/settlement/common/util.go +++ /dev/null @@ -1,74 +0,0 @@ -package common - -import ( - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type TransferTable struct { - txs map[string]map[string]*TransferTx -} - -type TransferTx struct { - From, To user.ID - - Amount *big.Int -} - -func NewTransferTable() *TransferTable { - return &TransferTable{ - txs: make(map[string]map[string]*TransferTx), - } -} - -func (t *TransferTable) Transfer(tx *TransferTx) { - if tx.From.Equals(tx.To) { - return - } - - from, to := tx.From.EncodeToString(), tx.To.EncodeToString() - - m, ok := t.txs[from] - if !ok { - if m, ok = t.txs[to]; ok { - to = from // ignore `From = To` swap because `From` doesn't require - tx.Amount.Neg(tx.Amount) - } else { - m = make(map[string]*TransferTx, 1) - t.txs[from] = m - } - } - - tgt, ok := m[to] - if !ok { - m[to] = tx - return - } - - tgt.Amount.Add(tgt.Amount, tx.Amount) -} - -func (t *TransferTable) Iterate(f func(*TransferTx)) { - for _, m := range t.txs { - for _, tx := range m { - f(tx) - } - } -} - -func TransferAssets(e Exchanger, t *TransferTable, details []byte) { - t.Iterate(func(tx *TransferTx) { - sign := tx.Amount.Sign() - if sign == 0 { - return - } - - if sign < 0 { - tx.From, tx.To = tx.To, tx.From - tx.Amount.Neg(tx.Amount) - } - - e.Transfer(tx.From, tx.To, tx.Amount, details) - }) -} diff --git a/pkg/innerring/processors/settlement/deps.go b/pkg/innerring/processors/settlement/deps.go deleted file mode 100644 index 37d7955ad..000000000 --- a/pkg/innerring/processors/settlement/deps.go +++ /dev/null @@ -1,17 +0,0 @@ -package settlement - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic" -) - -// AuditProcessor is an interface of data audit fee processor. -type AuditProcessor interface { - // Must process data audit conducted in epoch. - ProcessAuditSettlements(epoch uint64) -} - -// BasicIncomeInitializer is an interface of basic income context creator. -type BasicIncomeInitializer interface { - // Creates context that processes basic income for provided epoch. - CreateContext(epoch uint64) (*basic.IncomeSettlementContext, error) -} diff --git a/pkg/innerring/processors/settlement/events.go b/pkg/innerring/processors/settlement/events.go deleted file mode 100644 index a47a3e89b..000000000 --- a/pkg/innerring/processors/settlement/events.go +++ /dev/null @@ -1,46 +0,0 @@ -package settlement - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" -) - -// AuditEvent is an event of the start of -// cash settlements for data audit. -type AuditEvent struct { - epoch uint64 -} - -type ( - BasicIncomeCollectEvent = AuditEvent - BasicIncomeDistributeEvent = AuditEvent -) - -// MorphEvent implements Neo:Morph event. -func (e AuditEvent) MorphEvent() {} - -// NewAuditEvent creates new AuditEvent for epoch. -func NewAuditEvent(epoch uint64) event.Event { - return AuditEvent{ - epoch: epoch, - } -} - -// Epoch returns the number of the epoch -// in which the event was generated. -func (e AuditEvent) Epoch() uint64 { - return e.epoch -} - -// NewBasicIncomeCollectEvent for epoch. -func NewBasicIncomeCollectEvent(epoch uint64) event.Event { - return BasicIncomeCollectEvent{ - epoch: epoch, - } -} - -// NewBasicIncomeDistributeEvent for epoch. -func NewBasicIncomeDistributeEvent(epoch uint64) event.Event { - return BasicIncomeDistributeEvent{ - epoch: epoch, - } -} diff --git a/pkg/innerring/processors/settlement/handlers.go b/pkg/innerring/processors/settlement/handlers.go deleted file mode 100644 index f73b61983..000000000 --- a/pkg/innerring/processors/settlement/handlers.go +++ /dev/null @@ -1,19 +0,0 @@ -package settlement - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - -type auditEventHandler struct { - log *logger.Logger - - epoch uint64 - - proc AuditProcessor -} - -func (p *auditEventHandler) handle() { - p.log.Info("process audit settlements") - - p.proc.ProcessAuditSettlements(p.epoch) - - p.log.Info("audit processing finished") -} diff --git a/pkg/innerring/processors/settlement/opts.go b/pkg/innerring/processors/settlement/opts.go deleted file mode 100644 index b344f98d6..000000000 --- a/pkg/innerring/processors/settlement/opts.go +++ /dev/null @@ -1,31 +0,0 @@ -package settlement - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option is a Processor constructor's option. -type Option func(*options) - -type options struct { - poolSize int - - log *logger.Logger -} - -func defaultOptions() *options { - const poolSize = 10 - - return &options{ - poolSize: poolSize, - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns option to override the component for logging. -func WithLogger(l *logger.Logger) Option { - return func(o *options) { - o.log = l - } -} diff --git a/pkg/innerring/processors/settlement/processor.go b/pkg/innerring/processors/settlement/processor.go deleted file mode 100644 index e86666d5c..000000000 --- a/pkg/innerring/processors/settlement/processor.go +++ /dev/null @@ -1,78 +0,0 @@ -package settlement - -import ( - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic" - nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" -) - -type ( - // AlphabetState is a callback interface for inner ring global state. - AlphabetState interface { - IsAlphabet() bool - } - - // Processor is an event handler for payments in the system. - Processor struct { - log *logger.Logger - - state AlphabetState - - pool nodeutil.WorkerPool - - auditProc AuditProcessor - - basicIncome BasicIncomeInitializer - - contextMu sync.Mutex - incomeContexts map[uint64]*basic.IncomeSettlementContext - } - - // Prm groups the required parameters of Processor's constructor. - Prm struct { - AuditProcessor AuditProcessor - BasicIncome BasicIncomeInitializer - State AlphabetState - } -) - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf("invalid parameter %s (%T):%v", n, v, v)) -} - -// New creates and returns a new Processor instance. -func New(prm Prm, opts ...Option) *Processor { - switch { - case prm.AuditProcessor == nil: - panicOnPrmValue("AuditProcessor", prm.AuditProcessor) - } - - o := defaultOptions() - - for i := range opts { - opts[i](o) - } - - pool, err := ants.NewPool(o.poolSize, ants.WithNonblocking(true)) - if err != nil { - panic(fmt.Errorf("could not create worker pool: %w", err)) - } - - o.log.Debug("worker pool for settlement processor successfully initialized", - zap.Int("capacity", o.poolSize), - ) - - return &Processor{ - log: o.log, - state: prm.State, - pool: pool, - auditProc: prm.AuditProcessor, - basicIncome: prm.BasicIncome, - incomeContexts: make(map[uint64]*basic.IncomeSettlementContext), - } -} diff --git a/pkg/innerring/processors/subnet/common.go b/pkg/innerring/processors/subnet/common.go deleted file mode 100644 index 2026c8641..000000000 --- a/pkg/innerring/processors/subnet/common.go +++ /dev/null @@ -1,22 +0,0 @@ -package subnetevents - -import ( - "fmt" - - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" -) - -// common interface of subnet notifications with subnet ID. -type eventWithID interface { - // ReadID reads identifier of the subnet. - ReadID(*subnetid.ID) error -} - -// an error which is returned on zero subnet operation attempt. -type zeroSubnetOp struct { - op string -} - -func (x zeroSubnetOp) Error() string { - return fmt.Sprintf("zero subnet %s", x.op) -} diff --git a/pkg/innerring/processors/subnet/common_test.go b/pkg/innerring/processors/subnet/common_test.go deleted file mode 100644 index 23e61a44a..000000000 --- a/pkg/innerring/processors/subnet/common_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package subnetevents - -import subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" - -type idEvent struct { - id subnetid.ID - - idErr error -} - -func (x idEvent) ReadID(id *subnetid.ID) error { - if x.idErr != nil { - return x.idErr - } - - *id = x.id - - return nil -} diff --git a/pkg/innerring/processors/subnet/put.go b/pkg/innerring/processors/subnet/put.go deleted file mode 100644 index ba1588756..000000000 --- a/pkg/innerring/processors/subnet/put.go +++ /dev/null @@ -1,82 +0,0 @@ -package subnetevents - -import ( - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// Put represents a notification about FrostFS subnet creation. -// Generated by a contract when intending to create a subnet. -type Put interface { - // Contains the ID of the subnet to be created. - eventWithID - - // ReadCreator reads the user ID of the subnet creator. - // Returns an error if the ID is missing. - ReadCreator(id *user.ID) error - - // ReadInfo reads information about a subnet to be created. - ReadInfo(info *subnet.Info) error -} - -// PutValidator asserts intent to create a subnet. -type PutValidator struct{} - -// errDiffOwner is returned when the subnet owners differ. -var errDiffOwner = errors.New("diff subnet owners") - -// errDiffID is returned when the subnet IDs differ. -var errDiffID = errors.New("diff subnet IDs") - -// Assert processes the attempt to create a subnet. It approves the creation through nil return. -// -// All read errors of Put are forwarded. -// -// It returns an error on: -// - zero subnet creation; -// - empty ID or different from the one wired into info; -// - empty owner ID or different from the one wired into info. -func (x PutValidator) Assert(event Put) error { - var err error - - // read ID - var id subnetid.ID - if err = event.ReadID(&id); err != nil { - return fmt.Errorf("read ID: %w", err) - } - - // prevent zero subnet creation - if subnetid.IsZero(id) { - return zeroSubnetOp{ - op: "creation", - } - } - - // read creator's user ID in FrostFS system - var creator user.ID - if err = event.ReadCreator(&creator); err != nil { - return fmt.Errorf("read creator: %w", err) - } - - // read information about the subnet - var info subnet.Info - if err = event.ReadInfo(&info); err != nil { - return fmt.Errorf("read info: %w", err) - } - - // check if the explicit ID equals to the one from info - if !subnet.AssertReference(info, id) { - return errDiffID - } - - // check if the explicit creator equals to the one from info - if !subnet.AssertOwnership(info, creator) { - return errDiffOwner - } - - return nil -} diff --git a/pkg/innerring/processors/subnet/put_test.go b/pkg/innerring/processors/subnet/put_test.go deleted file mode 100644 index dda6ee90a..000000000 --- a/pkg/innerring/processors/subnet/put_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package subnetevents - -import ( - "errors" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "github.com/stretchr/testify/require" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" -) - -type put struct { - idEvent - - creator user.ID - - creatorErr error - - info subnet.Info - - infoErr error -} - -func (x put) ReadCreator(id *user.ID) error { - if x.creatorErr != nil { - return x.creatorErr - } - - *id = x.creator - - return nil -} - -func (x put) ReadInfo(info *subnet.Info) error { - if x.infoErr != nil { - return x.infoErr - } - - *info = x.info - - return nil -} - -func TestPutValidator_Assert(t *testing.T) { - var ( - v PutValidator - - e put - - err error - ) - - // read ID error - e.idErr = errors.New("id err") - - err = v.Assert(e) - require.ErrorIs(t, err, e.idErr) - - e.idErr = nil - - // zero subnet ID - subnetid.MakeZero(&e.id) - - err = v.Assert(e) - require.ErrorAs(t, err, new(zeroSubnetOp)) - - const idNum = 13 - e.id.SetNumeric(idNum) - - // read creator error - e.creatorErr = errors.New("creator err") - - err = v.Assert(e) - require.ErrorIs(t, err, e.creatorErr) - - e.creatorErr = nil - - // read info error - e.infoErr = errors.New("info err") - - err = v.Assert(e) - require.ErrorIs(t, err, e.infoErr) - - e.infoErr = nil - - // diff explicit ID and the one in info - var id2 subnetid.ID - - id2.SetNumeric(idNum + 1) - - e.info.SetID(id2) - - err = v.Assert(e) - require.ErrorIs(t, err, errDiffID) - - e.info.SetID(e.id) - - // diff explicit creator and the one in info - creator2 := *usertest.ID() - - e.info.SetOwner(creator2) - - err = v.Assert(e) - require.ErrorIs(t, err, errDiffOwner) - - e.info.SetOwner(e.creator) - - err = v.Assert(e) - require.NoError(t, err) -} diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go deleted file mode 100644 index 8e96deb7b..000000000 --- a/pkg/innerring/rpc.go +++ /dev/null @@ -1,235 +0,0 @@ -package innerring - -import ( - "context" - "crypto/ecdsa" - "fmt" - "time" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" - frostfsapiclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/internal/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "go.uber.org/zap" -) - -type ( - ClientCache struct { - log *logger.Logger - cache interface { - Get(clientcore.NodeInfo) (clientcore.Client, error) - CloseAll() - } - key *ecdsa.PrivateKey - - sgTimeout, headTimeout, rangeTimeout time.Duration - } - - clientCacheParams struct { - Log *logger.Logger - Key *ecdsa.PrivateKey - - AllowExternal bool - - SGTimeout, HeadTimeout, RangeTimeout time.Duration - } -) - -func newClientCache(p *clientCacheParams) *ClientCache { - return &ClientCache{ - log: p.Log, - cache: cache.NewSDKClientCache(cache.ClientCacheOpts{AllowExternal: p.AllowExternal, Key: p.Key}), - key: p.Key, - sgTimeout: p.SGTimeout, - headTimeout: p.HeadTimeout, - rangeTimeout: p.RangeTimeout, - } -} - -func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) { - // Because cache is used by `ClientCache` exclusively, - // client will always have valid key. - return c.cache.Get(info) -} - -// GetSG polls the container to get the object by id. -// Returns storage groups structure from received object. -// -// Returns an error of type apistatus.ObjectNotFound if storage group is missing. -func (c *ClientCache) GetSG(ctx context.Context, prm storagegroup2.GetSGPrm) (*storagegroup.StorageGroup, error) { - var sgAddress oid.Address - sgAddress.SetContainer(prm.CID) - sgAddress.SetObject(prm.OID) - - return c.getSG(ctx, sgAddress, &prm.NetMap, prm.Container) -} - -func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.NetMap, cn [][]netmap.NodeInfo) (*storagegroup.StorageGroup, error) { - obj := addr.Object() - - nodes, err := placement.BuildObjectPlacement(nm, cn, &obj) - if err != nil { - return nil, fmt.Errorf("can't build object placement: %w", err) - } - - var info clientcore.NodeInfo - - var getObjPrm frostfsapiclient.GetObjectPrm - getObjPrm.SetAddress(addr) - - for _, node := range placement.FlattenNodes(nodes) { - err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(node)) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - cli, err := c.getWrappedClient(info) - if err != nil { - c.log.Warn("can't setup remote connection", - zap.String("error", err.Error())) - - continue - } - - ctx, cancel := context.WithTimeout(ctx, c.sgTimeout) - - // NOTE: we use the function which does not verify object integrity (checksums, signature), - // but it would be useful to do as part of a data audit. - res, err := cli.GetObject(ctx, getObjPrm) - - cancel() - - if err != nil { - c.log.Warn("can't get storage group object", - zap.String("error", err.Error())) - - continue - } - - var sg storagegroup.StorageGroup - - err = storagegroup.ReadFromObject(&sg, *res.Object()) - if err != nil { - return nil, fmt.Errorf("can't parse storage group from a object: %w", err) - } - - return &sg, nil - } - - var errNotFound apistatus.ObjectNotFound - - return nil, errNotFound -} - -// GetHeader requests node from the container under audit to return object header by id. -func (c *ClientCache) GetHeader(ctx context.Context, prm auditor.GetHeaderPrm) (*object.Object, error) { - var objAddress oid.Address - objAddress.SetContainer(prm.CID) - objAddress.SetObject(prm.OID) - - var info clientcore.NodeInfo - - err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(prm.Node)) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - cli, err := c.getWrappedClient(info) - if err != nil { - return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err) - } - - cctx, cancel := context.WithTimeout(ctx, c.headTimeout) - - var obj *object.Object - - if prm.NodeIsRelay { - obj, err = frostfsapiclient.GetObjectHeaderFromContainer(cctx, cli, objAddress) - } else { - obj, err = frostfsapiclient.GetRawObjectHeaderLocally(cctx, cli, objAddress) - } - - cancel() - - if err != nil { - return nil, fmt.Errorf("object head error: %w", err) - } - - return obj, nil -} - -// GetRangeHash requests node from the container under audit to return Tillich-Zemor hash of the -// payload range of the object with specified identifier. -func (c *ClientCache) GetRangeHash(ctx context.Context, prm auditor.GetRangeHashPrm) ([]byte, error) { - var objAddress oid.Address - objAddress.SetContainer(prm.CID) - objAddress.SetObject(prm.OID) - - var info clientcore.NodeInfo - - err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(prm.Node)) - if err != nil { - return nil, fmt.Errorf("parse client node info: %w", err) - } - - cli, err := c.getWrappedClient(info) - if err != nil { - return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err) - } - - cctx, cancel := context.WithTimeout(ctx, c.rangeTimeout) - - h, err := frostfsapiclient.HashObjectRange(cctx, cli, objAddress, prm.Range) - - cancel() - - if err != nil { - return nil, fmt.Errorf("object rangehash error: %w", err) - } - - return h, nil -} - -func (c *ClientCache) getWrappedClient(info clientcore.NodeInfo) (frostfsapiclient.Client, error) { - // can be also cached - var cInternal frostfsapiclient.Client - - cli, err := c.Get(info) - if err != nil { - return cInternal, fmt.Errorf("could not get API client from cache") - } - - cInternal.WrapBasicClient(cli) - cInternal.SetPrivateKey(c.key) - - return cInternal, nil -} - -func (c ClientCache) ListSG(ctx context.Context, dst *storagegroup2.SearchSGDst, prm storagegroup2.SearchSGPrm) error { - cli, err := c.getWrappedClient(prm.NodeInfo) - if err != nil { - return fmt.Errorf("could not get API client from cache") - } - - var cliPrm frostfsapiclient.SearchSGPrm - - cliPrm.SetContainerID(prm.Container) - - res, err := cli.SearchSG(ctx, cliPrm) - if err != nil { - return err - } - - dst.Objects = res.IDList() - - return nil -} diff --git a/pkg/innerring/settlement.go b/pkg/innerring/settlement.go deleted file mode 100644 index 08e7a9f4d..000000000 --- a/pkg/innerring/settlement.go +++ /dev/null @@ -1,300 +0,0 @@ -package innerring - -import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/sha256" - "encoding/hex" - "fmt" - "math/big" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common" - auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit" - balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" - containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" - containerAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - netmapAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" -) - -const ( - auditSettlementContext = "audit" - basicIncomeSettlementContext = "basic income" -) - -type settlementDeps struct { - log *logger.Logger - - cnrSrc container.Source - - auditClient *auditClient.Client - - nmClient *netmapClient.Client - - clientCache *ClientCache - - balanceClient *balanceClient.Client - - settlementCtx string -} - -type auditSettlementDeps struct { - settlementDeps -} - -type basicIncomeSettlementDeps struct { - settlementDeps - cnrClient *containerClient.Client -} - -type basicSettlementConstructor struct { - dep *basicIncomeSettlementDeps -} - -type auditSettlementCalculator audit.Calculator - -type containerWrapper containerAPI.Container - -type nodeInfoWrapper struct { - ni netmapAPI.NodeInfo -} - -type sgWrapper storagegroup.StorageGroup - -func (s *sgWrapper) Size() uint64 { - return (*storagegroup.StorageGroup)(s).ValidationDataSize() -} - -func (n nodeInfoWrapper) PublicKey() []byte { - return n.ni.PublicKey() -} - -func (n nodeInfoWrapper) Price() *big.Int { - return big.NewInt(int64(n.ni.Price())) -} - -func (c containerWrapper) Owner() user.ID { - return (containerAPI.Container)(c).Owner() -} - -func (s settlementDeps) AuditResultsForEpoch(epoch uint64) ([]*auditAPI.Result, error) { - idList, err := s.auditClient.ListAuditResultIDByEpoch(epoch) - if err != nil { - return nil, fmt.Errorf("could not list audit results in sidechain: %w", err) - } - - res := make([]*auditAPI.Result, 0, len(idList)) - - for i := range idList { - r, err := s.auditClient.GetAuditResult(idList[i]) - if err != nil { - return nil, fmt.Errorf("could not get audit result: %w", err) - } - - res = append(res, r) - } - - return res, nil -} - -func (s settlementDeps) ContainerInfo(cid cid.ID) (common.ContainerInfo, error) { - cnr, err := s.cnrSrc.Get(cid) - if err != nil { - return nil, fmt.Errorf("could not get container from storage: %w", err) - } - - return (containerWrapper)(cnr.Value), nil -} - -func (s settlementDeps) buildContainer(e uint64, cid cid.ID) ([][]netmapAPI.NodeInfo, *netmapAPI.NetMap, error) { - var ( - nm *netmapAPI.NetMap - err error - ) - - if e > 0 { - nm, err = s.nmClient.GetNetMapByEpoch(e) - } else { - nm, err = netmap.GetLatestNetworkMap(s.nmClient) - } - - if err != nil { - return nil, nil, fmt.Errorf("could not get network map from storage: %w", err) - } - - cnr, err := s.cnrSrc.Get(cid) - if err != nil { - return nil, nil, fmt.Errorf("could not get container from sidechain: %w", err) - } - - binCnr := make([]byte, sha256.Size) - cid.Encode(binCnr) - - cn, err := nm.ContainerNodes( - cnr.Value.PlacementPolicy(), - binCnr, // may be replace pivot calculation to frostfs-api-go - ) - if err != nil { - return nil, nil, fmt.Errorf("could not calculate container nodes: %w", err) - } - - return cn, nm, nil -} - -func (s settlementDeps) ContainerNodes(e uint64, cid cid.ID) ([]common.NodeInfo, error) { - cn, _, err := s.buildContainer(e, cid) - if err != nil { - return nil, err - } - - var sz int - - for i := range cn { - sz += len(cn[i]) - } - - res := make([]common.NodeInfo, 0, sz) - - for i := range cn { - for j := range cn[i] { - res = append(res, nodeInfoWrapper{ - ni: cn[i][j], - }) - } - } - - return res, nil -} - -// SGInfo returns audit.SGInfo by object address. -// -// Returns an error of type apistatus.ObjectNotFound if storage group is missing. -func (s settlementDeps) SGInfo(addr oid.Address) (audit.SGInfo, error) { - cnr := addr.Container() - - cn, nm, err := s.buildContainer(0, cnr) - if err != nil { - return nil, err - } - - sg, err := s.clientCache.getSG(context.Background(), addr, nm, cn) - if err != nil { - return nil, err - } - - return (*sgWrapper)(sg), nil -} - -func (s settlementDeps) ResolveKey(ni common.NodeInfo) (*user.ID, error) { - pub, err := keys.NewPublicKeyFromBytes(ni.PublicKey(), elliptic.P256()) - if err != nil { - return nil, err - } - - var id user.ID - user.IDFromKey(&id, (ecdsa.PublicKey)(*pub)) - - return &id, nil -} - -func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, details []byte) { - if s.settlementCtx == "" { - panic("unknown settlement deps context") - } - - log := s.log.With( - zap.Stringer("sender", sender), - zap.Stringer("recipient", recipient), - zap.Stringer("amount (GASe-12)", amount), - zap.String("details", hex.EncodeToString(details)), - ) - - if !amount.IsInt64() { - s.log.Error("amount can not be represented as an int64") - - return - } - - params := balanceClient.TransferPrm{ - Amount: amount.Int64(), - From: sender, - To: recipient, - Details: details, - } - - err := s.balanceClient.TransferX(params) - if err != nil { - log.Error(fmt.Sprintf("%s: could not send transfer", s.settlementCtx), - zap.String("error", err.Error()), - ) - - return - } - - log.Debug(fmt.Sprintf("%s: transfer was successfully sent", s.settlementCtx)) -} - -func (b basicIncomeSettlementDeps) BasicRate() (uint64, error) { - return b.nmClient.BasicIncomeRate() -} - -func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient.Estimations, error) { - estimationIDs, err := b.cnrClient.ListLoadEstimationsByEpoch(epoch) - if err != nil { - return nil, err - } - - result := make([]*containerClient.Estimations, 0, len(estimationIDs)) - - for i := range estimationIDs { - estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i]) - if err != nil { - b.log.Warn("can't get used space estimation", - zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])), - zap.String("error", err.Error())) - - continue - } - - result = append(result, estimation) - } - - return result, nil -} - -func (b basicIncomeSettlementDeps) Balance(id user.ID) (*big.Int, error) { - return b.balanceClient.BalanceOf(id) -} - -func (s *auditSettlementCalculator) ProcessAuditSettlements(epoch uint64) { - (*audit.Calculator)(s).Calculate(&audit.CalculatePrm{ - Epoch: epoch, - }) -} - -func (b *basicSettlementConstructor) CreateContext(epoch uint64) (*basic.IncomeSettlementContext, error) { - return basic.NewIncomeSettlementContext(&basic.IncomeSettlementContextPrms{ - Log: b.dep.log, - Epoch: epoch, - Rate: b.dep, - Estimations: b.dep, - Balances: b.dep, - Container: b.dep, - Placement: b.dep, - Exchange: b.dep, - Accounts: b.dep, - }), nil -} diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 903d9c876..6a6ca0ade 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -4,9 +4,8 @@ import ( "fmt" "sort" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -61,7 +60,7 @@ func (s *Server) IsAlphabet() bool { func (s *Server) InnerRingIndex() int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error("can't get inner ring index", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) return -1 } @@ -73,7 +72,7 @@ func (s *Server) InnerRingIndex() int { func (s *Server) InnerRingSize() int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error("can't get inner ring size", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) return 0 } @@ -85,7 +84,7 @@ func (s *Server) InnerRingSize() int { func (s *Server) AlphabetIndex() int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error("can't get alphabet index", zap.String("error", err.Error())) + s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) return -1 } @@ -97,13 +96,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro index := s.InnerRingIndex() if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info("ignore validator vote: node not in alphabet range") + s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) return nil } if len(validators) == 0 { - s.log.Info("ignore validator vote: empty validators list") + s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) return nil } @@ -128,7 +127,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { - s.log.Warn("can't invoke vote method in alphabet contract", + s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), zap.String("error", err.Error())) @@ -145,18 +144,6 @@ func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) erro return s.voteForSidechainValidator(prm) } -// WriteReport composes the audit result structure from the audit report -// and sends it to Audit contract. -func (s *Server) WriteReport(r *audit.Report) error { - res := r.Result() - res.SetAuditorKey(s.pubKey) - - prm := auditClient.PutPrm{} - prm.SetResult(res) - - return s.auditClient.PutAuditResult(prm) -} - // ResetEpochTimer resets the block timer that produces events to update epoch // counter in the netmap contract. It is used to synchronize this even production // based on the block with a notification of the last epoch. diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go new file mode 100644 index 000000000..fe09f8f2d --- /dev/null +++ b/pkg/innerring/state_test.go @@ -0,0 +1,53 @@ +package innerring + +import ( + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestServerState(t *testing.T) { + keyStr := "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae" + commiteeKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr}) + require.NoError(t, err, "convert string to commitee public keys failed") + cf := &testCommiteeFetcher{ + keys: commiteeKeys, + } + + irKeys, err := keys.NewPublicKeysFromStrings([]string{keyStr}) + require.NoError(t, err, "convert string to IR public keys failed") + irf := &testIRFetcher{ + keys: irKeys, + } + + key, err := keys.NewPublicKeyFromString(keyStr) + require.NoError(t, err, "convert string to public key failed") + + require.NoError(t, err, "failed to create morph client") + srv := &Server{ + statusIndex: newInnerRingIndexer(cf, irf, key, time.Second), + morphClient: &client.Client{}, + } + + var epoch uint64 = 100 + srv.SetEpochCounter(epoch) + require.Equal(t, epoch, srv.EpochCounter(), "invalid epoch counter") + + var epochDuration uint64 = 15 + srv.SetEpochDuration(epochDuration) + require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration") + + var healthStatus control.HealthStatus = control.HealthStatus_READY + srv.setHealthStatus(healthStatus) + require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") + + require.True(t, srv.IsActive(), "invalid IsActive result") + require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result") + require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index") + require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index") + require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index") +} diff --git a/pkg/innerring/subnet.go b/pkg/innerring/subnet.go deleted file mode 100644 index 5375029d4..000000000 --- a/pkg/innerring/subnet.go +++ /dev/null @@ -1,354 +0,0 @@ -package innerring - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - - irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet" - netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - neogoutil "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/panjf2000/ants/v2" - "go.uber.org/zap" -) - -// IR server's component to handle Subnet contract notifications. -type subnetHandler struct { - workerPool util.WorkerPool - - morphClient morphsubnet.Client - - putValidator irsubnet.PutValidator -} - -// configuration of subnet component. -type subnetConfig struct { - queueSize uint32 -} - -// makes IR server to catch Subnet notifications from the sidechain listener, -// and to release the corresponding processing queue on stop. -func (s *Server) initSubnet(cfg subnetConfig) { - s.registerStarter(func() error { - var err error - - // initialize queue for processing of the events from Subnet contract - s.subnetHandler.workerPool, err = ants.NewPool(int(cfg.queueSize), ants.WithNonblocking(true)) - if err != nil { - return fmt.Errorf("subnet queue initialization: %w", err) - } - - // initialize morph client of Subnet contract - clientMode := morphsubnet.NotaryAlphabet - - if s.sideNotaryConfig.disabled { - clientMode = morphsubnet.NonNotary - } - - var initPrm morphsubnet.InitPrm - - initPrm.SetBaseClient(s.morphClient) - initPrm.SetContractAddress(s.contracts.subnet) - initPrm.SetMode(clientMode) - - err = s.subnetHandler.morphClient.Init(initPrm) - if err != nil { - return fmt.Errorf("init morph subnet client: %w", err) - } - - s.listenSubnet() - - return nil - }) - - s.registerCloser(func() error { - s.stopSubnet() - return nil - }) -} - -// releases the Subnet contract notification processing queue. -func (s *Server) stopSubnet() { - s.workerPool.Release() -} - -// names of listened notification events from Subnet contract. -const ( - subnetCreateEvName = "Put" - subnetRemoveEvName = "Delete" - notarySubnetCreateEvName = "put" -) - -// makes the IR server to listen to notifications of Subnet contract. -// All required resources must be initialized before (initSubnet). -// It works in one of two modes (configured): notary and non-notary. -// -// All handlers are executed only if the local node is an alphabet one. -// -// Events (notary): -// - put (parser: subnetevents.ParseNotaryPut, handler: catchSubnetCreation); -// - Delete (parser: subnetevents.ParseDelete, handler: catchSubnetCreation). -// -// Events (non-notary): -// - Put (parser: subnetevents.ParsePut, handler: catchSubnetCreation); -// - Delete (parser: subnetevents.ParseDelete, handler: catchSubnetCreation). -func (s *Server) listenSubnet() { - if s.sideNotaryConfig.disabled { - s.listenSubnetWithoutNotary() - return - } - - var ( - parserInfo event.NotaryParserInfo - handlerInfo event.NotaryHandlerInfo - ) - - parserInfo.SetScriptHash(s.contracts.subnet) - handlerInfo.SetScriptHash(s.contracts.subnet) - - listenNotaryEvent := func(notifyName string, parser event.NotaryParser, handler event.Handler) { - notifyTyp := event.NotaryTypeFromString(notifyName) - - parserInfo.SetMempoolType(mempoolevent.TransactionAdded) - handlerInfo.SetMempoolType(mempoolevent.TransactionAdded) - - parserInfo.SetParser(parser) - handlerInfo.SetHandler(handler) - - parserInfo.SetRequestType(notifyTyp) - handlerInfo.SetRequestType(notifyTyp) - - s.morphListener.SetNotaryParser(parserInfo) - s.morphListener.RegisterNotaryHandler(handlerInfo) - } - - // subnet creation - listenNotaryEvent(notarySubnetCreateEvName, subnetevents.ParseNotaryPut, s.onlyAlphabetEventHandler(s.catchSubnetCreation)) - // subnet removal - listenNotifySubnetEvent(s, subnetRemoveEvName, subnetevents.ParseDelete, s.onlyAlphabetEventHandler(s.catchSubnetRemoval)) -} - -func (s *Server) listenSubnetWithoutNotary() { - // subnet creation - listenNotifySubnetEvent(s, subnetCreateEvName, subnetevents.ParsePut, s.onlyAlphabetEventHandler(s.catchSubnetCreation)) - // subnet removal - listenNotifySubnetEvent(s, subnetRemoveEvName, subnetevents.ParseDelete, s.onlyAlphabetEventHandler(s.catchSubnetRemoval)) -} - -func listenNotifySubnetEvent(s *Server, notifyName string, parser event.NotificationParser, handler event.Handler) { - var ( - parserInfo event.NotificationParserInfo - handlerInfo event.NotificationHandlerInfo - ) - - parserInfo.SetScriptHash(s.contracts.subnet) - handlerInfo.SetScriptHash(s.contracts.subnet) - - notifyTyp := event.TypeFromString(notifyName) - - parserInfo.SetType(notifyTyp) - handlerInfo.SetType(notifyTyp) - - parserInfo.SetParser(parser) - handlerInfo.SetHandler(handler) - - s.morphListener.SetNotificationParser(parserInfo) - s.morphListener.RegisterNotificationHandler(handlerInfo) -} - -// catchSubnetCreation catches event of subnet creation from listener and queues the processing. -func (s *Server) catchSubnetCreation(e event.Event) { - err := s.subnetHandler.workerPool.Submit(func() { - s.handleSubnetCreation(e) - }) - if err != nil { - s.log.Error("subnet creation queue failure", - zap.String("error", err.Error()), - ) - } -} - -// implements irsubnet.Put event interface required by irsubnet.PutValidator. -type putSubnetEvent struct { - ev subnetevents.Put -} - -// ReadID unmarshals the subnet ID from a binary FrostFS API protocol's format. -func (x putSubnetEvent) ReadID(id *subnetid.ID) error { - return id.Unmarshal(x.ev.ID()) -} - -var errMissingSubnetOwner = errors.New("missing subnet owner") - -// ReadCreator unmarshals the subnet creator from a binary FrostFS API protocol's format. -// Returns an error if the byte array is empty. -func (x putSubnetEvent) ReadCreator(id *user.ID) error { - data := x.ev.Owner() - - if len(data) == 0 { - return errMissingSubnetOwner - } - - key, err := keys.NewPublicKeyFromBytes(data, elliptic.P256()) - if err != nil { - return err - } - - user.IDFromKey(id, (ecdsa.PublicKey)(*key)) - - return nil -} - -// ReadInfo unmarshal the subnet info from a binary FrostFS API protocol's format. -func (x putSubnetEvent) ReadInfo(info *subnet.Info) error { - return info.Unmarshal(x.ev.Info()) -} - -// handleSubnetCreation handles an event of subnet creation parsed via subnetevents.ParsePut. -// -// Validates the event using irsubnet.PutValidator. Logs message about (dis)agreement. -func (s *Server) handleSubnetCreation(e event.Event) { - putEv := e.(subnetevents.Put) // panic occurs only if we registered handler incorrectly - - err := s.subnetHandler.putValidator.Assert(putSubnetEvent{ - ev: putEv, - }) - if err != nil { - s.log.Info("discard subnet creation", - zap.String("reason", err.Error()), - ) - - return - } - - notaryMainTx := putEv.NotaryMainTx() - - isNotary := notaryMainTx != nil - if isNotary { - // re-sign notary request - err = s.morphClient.NotarySignAndInvokeTX(notaryMainTx) - } else { - // send new transaction - var prm morphsubnet.PutPrm - - prm.SetID(putEv.ID()) - prm.SetOwner(putEv.Owner()) - prm.SetInfo(putEv.Info()) - prm.SetTxHash(putEv.TxHash()) - - _, err = s.subnetHandler.morphClient.Put(prm) - } - - if err != nil { - s.log.Error("approve subnet creation", - zap.Bool("notary", isNotary), - zap.String("error", err.Error()), - ) - - return - } -} - -// catchSubnetRemoval catches an event of subnet removal from listener and queues the processing. -func (s *Server) catchSubnetRemoval(e event.Event) { - err := s.subnetHandler.workerPool.Submit(func() { - s.handleSubnetRemoval(e) - }) - if err != nil { - s.log.Error("subnet removal handling failure", - zap.String("error", err.Error()), - ) - } -} - -// handleSubnetRemoval handles event of subnet removal parsed via subnetevents.ParseDelete. -func (s *Server) handleSubnetRemoval(e event.Event) { - delEv := e.(subnetevents.Delete) // panic occurs only if we registered handler incorrectly - - // handle subnet changes in netmap - - candidates, err := s.netmapClient.GetCandidates() - if err != nil { - s.log.Error("getting netmap candidates", - zap.Error(err), - ) - - return - } - - var removedID subnetid.ID - err = removedID.Unmarshal(delEv.ID()) - if err != nil { - s.log.Error("unmarshalling removed subnet ID", - zap.String("error", err.Error()), - ) - - return - } - - for i := range candidates { - s.processCandidate(delEv.TxHash(), removedID, candidates[i]) - } -} - -func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.ID, c netmap.NodeInfo) { - removeSubnet := false - log := s.log.With( - zap.String("public_key", netmap.StringifyPublicKey(c)), - zap.String("removed_subnet", removedID.String()), - ) - - err := c.IterateSubnets(func(id subnetid.ID) error { - if removedID.Equals(id) { - removeSubnet = true - return netmap.ErrRemoveSubnet - } - - return nil - }) - if err != nil { - log.Error("iterating node's subnets", zap.Error(err)) - log.Debug("removing node from netmap candidates") - - var updateStatePrm netmapclient.UpdatePeerPrm - updateStatePrm.SetKey(c.PublicKey()) - updateStatePrm.SetHash(txHash) - - err = s.netmapClient.UpdatePeerState(updateStatePrm) - if err != nil { - log.Error("removing node from candidates", - zap.Error(err), - ) - } - - return - } - - // remove subnet from node's information - // if it contains removed subnet - if removeSubnet { - log.Debug("removing subnet from the node") - - var addPeerPrm netmapclient.AddPeerPrm - addPeerPrm.SetNodeInfo(c) - addPeerPrm.SetHash(txHash) - - err = s.netmapClient.AddPeer(addPeerPrm) - if err != nil { - log.Error("updating subnet info", - zap.Error(err), - ) - } - } -} diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index 853628fb4..5deaf5e4a 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -88,7 +88,7 @@ func TestBlobovnicza(t *testing.T) { var dPrm DeletePrm dPrm.SetAddress(addr) - _, err := blz.Delete(dPrm) + _, err := blz.Delete(context.Background(), dPrm) require.NoError(t, err) // should return 404 diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index 3912deac0..84274528a 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -5,6 +5,7 @@ import ( "os" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "go.etcd.io/bbolt" "go.uber.org/zap" @@ -14,7 +15,7 @@ import ( // // If the database file does not exist, it will be created automatically. func (b *Blobovnicza) Open() error { - b.log.Debug("creating directory for BoltDB", + b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB, zap.String("path", b.path), zap.Bool("ro", b.boltOptions.ReadOnly), ) @@ -28,7 +29,7 @@ func (b *Blobovnicza) Open() error { } } - b.log.Debug("opening BoltDB", + b.log.Debug(logs.BlobovniczaOpeningBoltDB, zap.String("path", b.path), zap.Stringer("permissions", b.perm), ) @@ -44,13 +45,13 @@ func (b *Blobovnicza) Open() error { // // Should not be called in read-only configuration. func (b *Blobovnicza) Init() error { - b.log.Debug("initializing...", + b.log.Debug(logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) if size := b.filled.Load(); size != 0 { - b.log.Debug("already initialized", zap.Uint64("size", size)) + b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size)) return nil } @@ -59,7 +60,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug("creating bucket for size range", + b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -86,7 +87,7 @@ func (b *Blobovnicza) Init() error { // Close releases all internal database resources. func (b *Blobovnicza) Close() error { - b.log.Debug("closing BoltDB", + b.log.Debug(logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index 1f885bd8e..29a587cc9 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -1,9 +1,15 @@ package blobovnicza import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -29,7 +35,13 @@ func (p *DeletePrm) SetAddress(addr oid.Address) { // Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza. // // Should not be called in read-only configuration. -func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) { +func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + addrKey := addressKey(prm.addr) removed := false @@ -51,7 +63,7 @@ func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) { err := buck.Delete(addrKey) if err == nil { - b.log.Debug("object was removed from bucket", + b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(sz)), zap.String("range", stringifyBounds(lower, upper)), ) diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go index 5b89760c6..82454fa28 100644 --- a/pkg/local_object_storage/blobovnicza/sizes.go +++ b/pkg/local_object_storage/blobovnicza/sizes.go @@ -3,6 +3,7 @@ package blobovnicza import ( "encoding/binary" "fmt" + "math/bits" "strconv" ) @@ -31,11 +32,11 @@ func bucketForSize(sz uint64) []byte { return bucketKeyFromBounds(upperPowerOfTwo(sz)) } -func upperPowerOfTwo(v uint64) (upperBound uint64) { - for upperBound = firstBucketBound; upperBound < v; upperBound *= 2 { +func upperPowerOfTwo(v uint64) uint64 { + if v <= firstBucketBound { + return firstBucketBound } - - return + return 1 << bits.Len64(v-1) } func (b *Blobovnicza) incSize(sz uint64) { diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go index c218dc638..2bf451f12 100644 --- a/pkg/local_object_storage/blobovnicza/sizes_test.go +++ b/pkg/local_object_storage/blobovnicza/sizes_test.go @@ -1,6 +1,7 @@ package blobovnicza import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -36,3 +37,13 @@ func TestSizes(t *testing.T) { require.Equal(t, bucketKeyFromBounds(item.upperBound), bucketForSize(item.sz)) } } + +func BenchmarkUpperBound(b *testing.B) { + for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} { + b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = upperPowerOfTwo(size) + } + }) + } +} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index c628c96be..af976f977 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -7,6 +7,7 @@ import ( "strconv" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" @@ -104,12 +105,12 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) { // it from opened cache. return } else if err := value.Close(); err != nil { - blz.log.Error("could not close Blobovnicza", + blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", p), zap.String("error", err.Error()), ) } else { - blz.log.Debug("blobovnicza successfully closed on evict", + blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict, zap.String("id", p), ) } @@ -141,11 +142,11 @@ func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error // // if current active blobovnicza's index is not old, it remains unchanged. func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error { - b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath)) + b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath)) _, err := b.updateAndGet(lvlPath, old) - b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath)) + b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath)) return err } @@ -201,7 +202,7 @@ func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWit } b.lruMtx.Unlock() - b.log.Debug("blobovnicza successfully activated", + b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated, zap.String("path", activePath)) return active, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index e7e890e50..0240c7a97 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -4,6 +4,7 @@ import ( "fmt" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "go.uber.org/zap" ) @@ -18,10 +19,10 @@ func (b *Blobovniczas) Open(readOnly bool) error { // // Should be called exactly once. func (b *Blobovniczas) Init() error { - b.log.Debug("initializing Blobovnicza's") + b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas) if b.readOnly { - b.log.Debug("read-only mode, skip blobovniczas initialization...") + b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) return nil } @@ -36,7 +37,7 @@ func (b *Blobovniczas) Init() error { return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err) } - b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p)) + b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) return false, nil }) } @@ -49,7 +50,7 @@ func (b *Blobovniczas) Close() error { for p, v := range b.active { if err := v.blz.Close(); err != nil { - b.log.Debug("could not close active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza, zap.String("path", p), zap.String("error", err.Error()), ) @@ -59,7 +60,7 @@ func (b *Blobovniczas) Close() error { for _, k := range b.opened.Keys() { blz, _ := b.opened.Get(k) if err := blz.Close(); err != nil { - b.log.Debug("could not close active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza, zap.String("path", k), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 7e14d6d8d..f84d8fbe8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -1,12 +1,18 @@ package blobovniczatree import ( + "context" + "encoding/hex" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -14,7 +20,14 @@ import ( // // If blobocvnicza ID is specified, only this blobovnicza is processed. // Otherwise, all Blobovniczas are processed descending weight. -func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err error) { +func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), + )) + defer span.End() + if b.readOnly { return common.DeleteRes{}, common.ErrReadOnly } @@ -29,7 +42,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e return res, err } - return b.deleteObject(blz, bPrm, prm) + return b.deleteObject(ctx, blz, bPrm) } activeCache := make(map[string]struct{}) @@ -41,10 +54,10 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e // don't process active blobovnicza of the level twice _, ok := activeCache[dirPath] - res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm) + res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -72,7 +85,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e // tries to delete object from particular blobovnicza. // // returns no error if object was removed from some blobovnicza of the same level. -func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) { +func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool) (common.DeleteRes, error) { lvlPath := filepath.Dir(blzPath) // try to remove from blobovnicza if it is opened @@ -80,10 +93,10 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath v, ok := b.opened.Get(blzPath) b.lruMtx.Unlock() if ok { - if res, err := b.deleteObject(v, prm, dp); err == nil { + if res, err := b.deleteObject(ctx, v, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -99,10 +112,10 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath b.activeMtx.RUnlock() if ok && tryActive { - if res, err := b.deleteObject(active.blz, prm, dp); err == nil { + if res, err := b.deleteObject(ctx, active.blz, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not remove object from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -124,11 +137,11 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath return common.DeleteRes{}, err } - return b.deleteObject(blz, prm, dp) + return b.deleteObject(ctx, blz, prm) } // removes object from blobovnicza and returns common.DeleteRes. -func (b *Blobovniczas) deleteObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) { - _, err := blz.Delete(prm) +func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { + _, err := blz.Delete(ctx, prm) return common.DeleteRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 748843ee9..9d9fd4cba 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -6,6 +6,7 @@ import ( "path/filepath" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.opentelemetry.io/otel/attribute" @@ -47,7 +48,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common _, err := b.getObjectFromLevel(ctx, gPrm, p, !ok) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index 08fd2223f..8d9fe526e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -33,7 +33,7 @@ func TestExistsInvalidStorageID(t *testing.T) { d, err := obj.Marshal() require.NoError(t, err) - putRes, err := b.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true}) + putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true}) require.NoError(t, err) t.Run("valid but wrong storage id", func(t *testing.T) { @@ -50,15 +50,15 @@ func TestExistsInvalidStorageID(t *testing.T) { require.False(t, res.Exists) }) - t.Run("invalid storage id", func(t *testing.T) { - storageID := slice.Copy(putRes.StorageID) - storageID[0] = '9' - badDir := filepath.Join(dir, "9") - require.NoError(t, os.MkdirAll(badDir, os.ModePerm)) - require.NoError(t, os.Chmod(badDir, 0)) - t.Cleanup(func() { _ = os.Chmod(filepath.Join(dir, "9"), os.ModePerm) }) + t.Run("valid id but corrupted file", func(t *testing.T) { + relBadFileDir := filepath.Join("9", "0") + badFileName := "0" - res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID}) + // An invalid boltdb file is created so that it returns an error when opened + require.NoError(t, os.MkdirAll(filepath.Join(dir, relBadFileDir), os.ModePerm)) + require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName), []byte("not a boltdb file content"), 0777)) + + res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: []byte(filepath.Join(relBadFileDir, badFileName))}) require.Error(t, err) require.False(t, res.Exists) }) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index bb84db086..0b8ccb64f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -7,6 +7,7 @@ import ( "path/filepath" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -53,7 +54,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok) if err != nil { if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -88,7 +89,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if res, err := b.getObject(ctx, v, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read object from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -108,7 +109,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if res, err := b.getObject(ctx, active.blz, prm); err == nil { return res, err } else if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index b12cb32d4..d6dfe51bd 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -8,6 +8,7 @@ import ( "strconv" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -54,7 +55,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { outOfBounds := isErrOutOfRange(err) if !outOfBounds && !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not get object from level", + b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), ) @@ -98,7 +99,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang return res, err default: if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read payload range from opened blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) @@ -123,7 +124,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang return res, err default: if !blobovnicza.IsErrNotFound(err) { - b.log.Debug("could not read payload range from active blobovnicza", + b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza, zap.String("path", blzPath), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index db7ca1082..ec302d143 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -1,19 +1,31 @@ package blobovniczatree import ( + "context" "errors" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) // Put saves object in the maximum weight blobobnicza. // // returns error if could not save object in any blobovnicza. -func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) { +func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + if b.readOnly { return common.PutRes{}, common.ErrReadOnly } @@ -56,9 +68,9 @@ func (i *putIterator) iterate(path string) (bool, error) { active, err := i.B.getActivated(path) if err != nil { if !isLogical(err) { - i.B.reportError("could not get active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Debug("could not get active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.String("error", err.Error())) } @@ -71,15 +83,15 @@ func (i *putIterator) iterate(path string) (bool, error) { // and `updateActive` takes care of not updating the active blobovnicza twice. if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) { if isFull { - i.B.log.Debug("blobovnicza overflowed", + i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("path", filepath.Join(path, u64ToHexString(active.ind)))) } if err := i.B.updateActive(path, &active.ind); err != nil { if !isLogical(err) { - i.B.reportError("could not update active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err) } else { - i.B.log.Debug("could not update active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, zap.String("level", path), zap.String("error", err.Error())) } @@ -92,9 +104,9 @@ func (i *putIterator) iterate(path string) (bool, error) { i.AllFull = false if !isLogical(err) { - i.B.reportError("could not put object to active blobovnicza", err) + i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Debug("could not put object to active blobovnicza", + i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", filepath.Join(path, u64ToHexString(active.ind))), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 738cd7eee..ef85f0f1d 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -3,6 +3,7 @@ package blobstor import ( "context" "path/filepath" + "sync" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -75,12 +76,12 @@ func TestCompression(t *testing.T) { testPut := func(t *testing.T, b *BlobStor, i int) { var prm common.PutPrm prm.Object = smallObj[i] - _, err := b.Put(prm) + _, err := b.Put(context.Background(), prm) require.NoError(t, err) prm = common.PutPrm{} prm.Object = bigObj[i] - _, err = b.Put(prm) + _, err = b.Put(context.Background(), prm) require.NoError(t, err) } @@ -174,3 +175,156 @@ func TestBlobstor_needsCompression(t *testing.T) { require.False(t, b.NeedsCompression(obj)) }) } + +func TestConcurrentPut(t *testing.T) { + dir := t.TempDir() + + const ( + smallSizeLimit = 512 + + // concurrentPutCount is fstree implementation specific + concurrentPutCount = 5 + ) + + blobStor := New( + WithStorages(defaultStorages(dir, smallSizeLimit))) + require.NoError(t, blobStor.Open(false)) + require.NoError(t, blobStor.Init()) + + testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)}) + require.NoError(t, err) + require.Equal(t, obj, res.Object) + } + + testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + var prm common.PutPrm + prm.Object = obj + _, err := b.Put(context.Background(), prm) + require.NoError(t, err) + } + + testPutFileExistsError := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + var prm common.PutPrm + prm.Object = obj + if _, err := b.Put(context.Background(), prm); err != nil { + require.ErrorContains(t, err, "file exists") + } + } + + t.Run("put the same big object", func(t *testing.T) { + bigObj := testObject(smallSizeLimit * 2) + + var wg sync.WaitGroup + for i := 0; i < concurrentPutCount; i++ { + wg.Add(1) + go func() { + testPut(t, blobStor, bigObj) + wg.Done() + }() + } + wg.Wait() + + testGet(t, blobStor, bigObj) + }) + + t.Run("put the same big object with error", func(t *testing.T) { + bigObj := testObject(smallSizeLimit * 2) + + var wg sync.WaitGroup + for i := 0; i < concurrentPutCount+1; i++ { + wg.Add(1) + go func() { + testPutFileExistsError(t, blobStor, bigObj) + wg.Done() + }() + } + wg.Wait() + + testGet(t, blobStor, bigObj) + }) + + t.Run("put the same small object", func(t *testing.T) { + smallObj := testObject(smallSizeLimit / 2) + + var wg sync.WaitGroup + for i := 0; i < concurrentPutCount; i++ { + wg.Add(1) + go func() { + testPut(t, blobStor, smallObj) + wg.Done() + }() + } + wg.Wait() + + testGet(t, blobStor, smallObj) + }) +} + +func TestConcurrentDelete(t *testing.T) { + dir := t.TempDir() + + const smallSizeLimit = 512 + + blobStor := New( + WithStorages(defaultStorages(dir, smallSizeLimit))) + require.NoError(t, blobStor.Open(false)) + require.NoError(t, blobStor.Init()) + + testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + var prm common.PutPrm + prm.Object = obj + _, err := b.Put(context.Background(), prm) + require.NoError(t, err) + } + + testDelete := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + var prm common.DeletePrm + prm.Address = object.AddressOf(obj) + if _, err := b.Delete(context.Background(), prm); err != nil { + require.ErrorContains(t, err, "object not found") + } + } + + testDeletedExists := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { + var prm common.ExistsPrm + prm.Address = object.AddressOf(obj) + res, err := b.Exists(context.Background(), prm) + require.NoError(t, err) + require.False(t, res.Exists) + } + + t.Run("delete the same big object", func(t *testing.T) { + bigObj := testObject(smallSizeLimit * 2) + testPut(t, blobStor, bigObj) + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + testDelete(t, blobStor, bigObj) + wg.Done() + }() + } + wg.Wait() + + testDeletedExists(t, blobStor, bigObj) + }) + + t.Run("delete the same small object", func(t *testing.T) { + smallObj := testObject(smallSizeLimit / 2) + testPut(t, blobStor, smallObj) + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + testDelete(t, blobStor, smallObj) + wg.Done() + }() + } + wg.Wait() + + testDeletedExists(t, blobStor, smallObj) + }) +} diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index b5d186242..801d32c1e 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -23,7 +23,7 @@ type Storage interface { Get(context.Context, GetPrm) (GetRes, error) GetRange(context.Context, GetRangePrm) (GetRangeRes, error) Exists(context.Context, ExistsPrm) (ExistsRes, error) - Put(PutPrm) (PutRes, error) - Delete(DeletePrm) (DeleteRes, error) + Put(context.Context, PutPrm) (PutRes, error) + Delete(context.Context, DeletePrm) (DeleteRes, error) Iterate(IteratePrm) (IterateRes, error) } diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 79e37f8ae..4ebf057d3 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -87,7 +87,11 @@ func (c *Config) Compress(data []byte) []byte { return data } maxSize := c.encoder.MaxEncodedSize(len(data)) - return c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) + compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) + if len(data) < len(compressed) { + return data + } + return compressed } // Close closes encoder and decoder, returns any error occurred. diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 6ceb9cefa..abe39575b 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -4,12 +4,13 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) // Open opens BlobStor. func (b *BlobStor) Open(readOnly bool) error { - b.log.Debug("opening...") + b.log.Debug(logs.BlobstorOpening) for i := range b.storage { err := b.storage[i].Storage.Open(readOnly) @@ -29,7 +30,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. func (b *BlobStor) Init() error { - b.log.Debug("initializing...") + b.log.Debug(logs.BlobstorInitializing) if err := b.compression.Init(); err != nil { return err @@ -46,13 +47,13 @@ func (b *BlobStor) Init() error { // Close releases all internal resources of BlobStor. func (b *BlobStor) Close() error { - b.log.Debug("closing...") + b.log.Debug(logs.BlobstorClosing) var firstErr error for i := range b.storage { err := b.storage[i].Storage.Close() if err != nil { - b.log.Info("couldn't close storage", zap.String("error", err.Error())) + b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go index 8c5a7aba6..377214fb8 100644 --- a/pkg/local_object_storage/blobstor/delete.go +++ b/pkg/local_object_storage/blobstor/delete.go @@ -1,19 +1,31 @@ package blobstor import ( + "context" + "encoding/hex" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) -func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) { +func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.String("storage_id", hex.EncodeToString(prm.StorageID)), + )) + defer span.End() + b.modeMtx.RLock() defer b.modeMtx.RUnlock() if prm.StorageID == nil { for i := range b.storage { - res, err := b.storage[i].Storage.Delete(prm) + res, err := b.storage[i].Storage.Delete(ctx, prm) if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) { if err == nil { logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) @@ -31,7 +43,7 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) { st = b.storage[0].Storage } - res, err := st.Delete(prm) + res, err := st.Delete(ctx, prm) if err == nil { logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 5882c33e0..3c76764a9 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -57,7 +58,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi } for _, err := range errors[:len(errors)-1] { - b.log.Warn("error occurred during object existence checking", + b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go index 805d78297..7a3dcd617 100644 --- a/pkg/local_object_storage/blobstor/exists_test.go +++ b/pkg/local_object_storage/blobstor/exists_test.go @@ -2,7 +2,6 @@ package blobstor import ( "context" - "os" "testing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -15,13 +14,9 @@ import ( ) func TestExists(t *testing.T) { - dir, err := os.MkdirTemp("", "frostfs*") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - const smallSizeLimit = 512 - storages, _, largeFileStorage := defaultTestStorages(dir, smallSizeLimit) + storages, _, largeFileStorage := defaultTestStorages(t.TempDir(), smallSizeLimit) b := New(WithStorages(storages)) @@ -36,7 +31,7 @@ func TestExists(t *testing.T) { for i := range objects { var prm common.PutPrm prm.Object = objects[i] - _, err := b.Put(prm) + _, err := b.Put(context.Background(), prm) require.NoError(t, err) } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 462fbd63f..75f63193a 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -196,7 +196,13 @@ func (t *FSTree) treePath(addr oid.Address) string { } // Delete removes the object with the specified address from the storage. -func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) { +func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + )) + defer span.End() + if t.readOnly { return common.DeleteRes{}, common.ErrReadOnly } @@ -230,7 +236,14 @@ func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exist } // Put puts an object in the storage. -func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) { +func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "FSTree.Put", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + if t.readOnly { return common.PutRes{}, common.ErrReadOnly } @@ -455,6 +468,6 @@ func (t *FSTree) SetCompressor(cc *compression.Config) { } // SetReportErrorFunc implements common.Storage. -func (t *FSTree) SetReportErrorFunc(f func(string, error)) { +func (t *FSTree) SetReportErrorFunc(_ func(string, error)) { // Do nothing, FSTree can encounter only one error which is returned. } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go index b2663be21..e31f3280a 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go @@ -1,6 +1,7 @@ package blobstortest import ( + "context" "math/rand" "testing" @@ -67,7 +68,7 @@ func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objec prm.Object = objects[i].obj prm.RawData = objects[i].raw - putRes, err := s.Put(prm) + putRes, err := s.Put(context.Background(), prm) require.NoError(t, err) objects[i].storageID = putRes.StorageID diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go index 350bea96a..96d54dec3 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go @@ -36,7 +36,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) { prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1)))) prm.Address = objectCore.AddressOf(prm.Object) - _, err := s.Put(prm) + _, err := s.Put(context.Background(), prm) require.ErrorIs(t, err, common.ErrReadOnly) }) t.Run("delete fails", func(t *testing.T) { @@ -44,7 +44,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) { prm.Address = objects[0].addr prm.StorageID = objects[0].storageID - _, err := s.Delete(prm) + _, err := s.Delete(context.Background(), prm) require.ErrorIs(t, err, common.ErrReadOnly) }) } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go index ad0045316..7532a5b5f 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go @@ -22,7 +22,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) { var prm common.DeletePrm prm.Address = oidtest.Address() - _, err := s.Delete(prm) + _, err := s.Delete(context.Background(), prm) require.Error(t, err, new(apistatus.ObjectNotFound)) }) @@ -31,7 +31,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) { prm.Address = objects[0].addr prm.StorageID = objects[0].storageID - _, err := s.Delete(prm) + _, err := s.Delete(context.Background(), prm) require.NoError(t, err) t.Run("exists fail", func(t *testing.T) { @@ -55,7 +55,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) { var prm common.DeletePrm prm.Address = objects[1].addr - _, err := s.Delete(prm) + _, err := s.Delete(context.Background(), prm) require.NoError(t, err) }) @@ -64,10 +64,10 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) { prm.Address = objects[2].addr prm.StorageID = objects[2].storageID - _, err := s.Delete(prm) + _, err := s.Delete(context.Background(), prm) require.NoError(t, err) - _, err = s.Delete(prm) + _, err = s.Delete(context.Background(), prm) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) }) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index f98cca638..83ada9607 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -1,6 +1,7 @@ package blobstortest import ( + "context" "errors" "testing" @@ -22,7 +23,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) { var delPrm common.DeletePrm delPrm.Address = objects[2].addr delPrm.StorageID = objects[2].storageID - _, err := s.Delete(delPrm) + _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) objects = append(objects[:delID], objects[delID+1:]...) diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index 0461dd803..2c37ee776 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -3,6 +3,7 @@ package blobstor import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -38,7 +39,7 @@ func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, d } prm.IgnoreErrors = true prm.ErrorHandler = func(addr oid.Address, err error) error { - blz.log.Warn("error occurred during the iteration", + blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), zap.String("err", err.Error())) return nil diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index b2a7ddfb9..6488ff5fc 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -1,6 +1,7 @@ package blobstor import ( + "context" "encoding/binary" "os" "testing" @@ -63,7 +64,7 @@ func TestIterateObjects(t *testing.T) { } for _, v := range mObjs { - _, err := blobStor.Put(common.PutPrm{Address: v.addr, RawData: v.data}) + _, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data}) require.NoError(t, err) } diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 4068d742e..e435cfef4 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -91,7 +91,7 @@ func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.E return common.ExistsRes{Exists: exists}, nil } -func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) { +func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) { if s.readOnly { return common.PutRes{}, common.ErrReadOnly } @@ -108,7 +108,7 @@ func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) { return common.PutRes{StorageID: []byte(s.rootPath)}, nil } -func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) { +func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) { if s.readOnly { return common.DeleteRes{}, common.ErrReadOnly } diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go index 6482b2cff..125276290 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go @@ -28,7 +28,7 @@ func TestSimpleLifecycle(t *testing.T) { require.NoError(t, err) { - _, err := s.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true}) + _, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true}) require.NoError(t, err) } @@ -57,7 +57,7 @@ func TestSimpleLifecycle(t *testing.T) { } { - _, err := s.Delete(common.DeletePrm{Address: addr}) + _, err := s.Delete(context.Background(), common.DeletePrm{Address: addr}) require.NoError(t, err) } diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index d2359335f..f21982530 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -3,7 +3,6 @@ package blobstor import ( "context" "fmt" - "os" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" @@ -21,18 +20,13 @@ type storage struct { } func (s storage) open(b *testing.B) common.Storage { - dir, err := os.MkdirTemp(os.TempDir(), s.desc) - if err != nil { - b.Fatalf("creating %s root path: %v", s.desc, err) - } - st := s.create(dir) + st := s.create(b.TempDir()) require.NoError(b, st.Open(false)) require.NoError(b, st.Init()) b.Cleanup(func() { require.NoError(b, st.Close()) - require.NoError(b, os.RemoveAll(dir)) }) return st @@ -114,7 +108,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { if err != nil { return fmt.Errorf("marshal: %v", err) } - _, err = st.Put(common.PutPrm{ + _, err = st.Put(context.Background(), common.PutPrm{ Address: addr, RawData: raw, }) @@ -165,7 +159,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) { addr := testutil.AddressFromObject(b, obj) raw, err := obj.Marshal() require.NoError(b, err) - if _, err := st.Put(common.PutPrm{ + if _, err := st.Put(context.Background(), common.PutPrm{ Address: addr, RawData: raw, }); err != nil { @@ -202,7 +196,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { addr := testutil.AddressFromObject(b, obj) raw, err := obj.Marshal() require.NoError(b, err) - if _, err := st.Put(common.PutPrm{ + if _, err := st.Put(context.Background(), common.PutPrm{ Address: addr, RawData: raw, }); err != nil { diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index a4009ae43..2ae7f0fe6 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -1,12 +1,16 @@ package blobstor import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // ErrNoPlaceFound is returned when object can't be saved to any sub-storage component @@ -21,7 +25,14 @@ var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object") // // Returns any error encountered that // did not allow to completely save the object. -func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) { +func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + b.modeMtx.RLock() defer b.modeMtx.RUnlock() @@ -39,7 +50,7 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) { for i := range b.storage { if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) { - res, err := b.storage[i].Storage.Put(prm) + res, err := b.storage[i].Storage.Put(ctx, prm) if err == nil { logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 03f64f0f1..24d742fda 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -176,27 +176,27 @@ func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.Ex } } -func (s *TestStore) Put(req common.PutPrm) (common.PutRes, error) { +func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) { s.mu.RLock() defer s.mu.RUnlock() switch { case s.overrides.Put != nil: return s.overrides.Put(req) case s.st != nil: - return s.st.Put(req) + return s.st.Put(ctx, req) default: panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req)) } } -func (s *TestStore) Delete(req common.DeletePrm) (common.DeleteRes, error) { +func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) { s.mu.RLock() defer s.mu.RUnlock() switch { case s.overrides.Delete != nil: return s.overrides.Delete(req) case s.st != nil: - return s.st.Delete(req) + return s.st.Delete(ctx, req) default: panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req)) } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 3e176dc91..9ad4fcf9c 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -8,6 +8,7 @@ import ( "strings" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "go.uber.org/zap" @@ -47,7 +48,7 @@ func (e *StorageEngine) open() error { for res := range errCh { if res.err != nil { - e.log.Error("could not open shard, closing and skipping", + e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -56,7 +57,7 @@ func (e *StorageEngine) open() error { err := sh.Close() if err != nil { - e.log.Error("could not close partially initialized shard", + e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -94,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { for res := range errCh { if res.err != nil { if errors.Is(res.err, blobstor.ErrInitBlobovniczas) { - e.log.Error("could not initialize shard, closing and skipping", + e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -103,7 +104,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := sh.Close() if err != nil { - e.log.Error("could not close partially initialized shard", + e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -149,7 +150,7 @@ func (e *StorageEngine) close(releasePools bool) error { for id, sh := range e.shards { if err := sh.Close(); err != nil { - e.log.Debug("could not close shard", + e.log.Debug(logs.EngineCouldNotCloseShard, zap.String("id", id), zap.String("error", err.Error()), ) @@ -307,9 +308,9 @@ loop: e.removeShards(shardsToRemove...) for _, p := range shardsToReload { - err := p.sh.Reload(p.opts...) + err := p.sh.Reload(ctx, p.opts...) if err != nil { - e.log.Error("could not reload a shard", + e.log.Error(logs.EngineCouldNotReloadAShard, zap.Stringer("shard id", p.sh.ID()), zap.Error(err)) } @@ -338,7 +339,7 @@ loop: return fmt.Errorf("could not add %s shard: %w", idStr, err) } - e.log.Info("added new shard", zap.String("id", idStr)) + e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr)) } return nil diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index 91bec63a6..d7eaae1d8 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -204,7 +204,7 @@ func TestExecBlocks(t *testing.T) { addr := object.AddressOf(obj) - require.NoError(t, Put(e, obj)) + require.NoError(t, Put(context.Background(), e, obj)) // block executions errBlock := errors.New("block exec err") @@ -233,8 +233,7 @@ func TestExecBlocks(t *testing.T) { } func TestPersistentShardID(t *testing.T) { - dir, err := os.MkdirTemp("", "*") - require.NoError(t, err) + dir := t.TempDir() te := newEngineWithErrorThreshold(t, dir, 1) diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 2105c452f..f9b9c9a87 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -4,10 +4,14 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -46,6 +50,13 @@ func (p *DeletePrm) WithForceRemoval() { // on operations with that object) if WithForceRemoval option has // been provided. func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + attribute.Bool("force_removal", prm.forceRemoval), + )) + defer span.End() + err = e.execIfNotBlocked(func() error { res, err = e.delete(ctx, prm) return err @@ -134,9 +145,9 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo } e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { - res, err := sh.Select(selectPrm) + res, err := sh.Select(ctx, selectPrm) if err != nil { - e.log.Warn("error during searching for object children", + e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), zap.String("error", err.Error())) return false @@ -147,7 +158,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug("could not inhume object in shard", + e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), zap.String("err", err.Error())) continue diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 259a40a7c..bbc27615a 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -18,6 +18,8 @@ import ( ) func TestDeleteBigObject(t *testing.T) { + t.Parallel() + defer os.RemoveAll(t.Name()) cnr := cidtest.ID() @@ -59,9 +61,9 @@ func TestDeleteBigObject(t *testing.T) { defer e.Close() for i := range children { - require.NoError(t, Put(e, children[i])) + require.NoError(t, Put(context.Background(), e, children[i])) } - require.NoError(t, Put(e, link)) + require.NoError(t, Put(context.Background(), e, link)) var splitErr *objectSDK.SplitInfoError diff --git a/pkg/local_object_storage/engine/dump.go b/pkg/local_object_storage/engine/dump.go deleted file mode 100644 index f5cf8c32e..000000000 --- a/pkg/local_object_storage/engine/dump.go +++ /dev/null @@ -1,19 +0,0 @@ -package engine - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - -// DumpShard dumps objects from the shard with provided identifier. -// -// Returns an error if shard is not read-only. -func (e *StorageEngine) DumpShard(id *shard.ID, prm shard.DumpPrm) error { - e.mtx.RLock() - defer e.mtx.RUnlock() - - sh, ok := e.shards[id.String()] - if !ok { - return errShardNotFound - } - - _, err := sh.Dump(prm) - return err -} diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index e0161bfe3..20c8a946b 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -4,6 +4,7 @@ import ( "errors" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -87,24 +88,24 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) { sid := sh.ID() err := sh.SetMode(mode.DegradedReadOnly) if err != nil { - e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only", + e.log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.Error(err)) err = sh.SetMode(mode.ReadOnly) if err != nil { - e.log.Error("failed to move shard in read-only mode", + e.log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.Error(err)) } else { - e.log.Info("shard is moved in read-only mode due to error threshold", + e.log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount)) } } else { - e.log.Info("shard is moved in degraded mode due to error threshold", + e.log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold, zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount)) } @@ -182,7 +183,7 @@ func (e *StorageEngine) reportShardErrorWithFlags( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug("mode change is in progress, ignoring set-mode request", + e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index ddaf88d18..4d2ddc100 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -60,7 +60,7 @@ func benchmarkExists(b *testing.B, shardNum int) { addr := oidtest.Address() for i := 0; i < 100; i++ { obj := testutil.GenerateObjectWithCID(cidtest.ID()) - err := Put(e, obj) + err := Put(context.Background(), e, obj) if err != nil { b.Fatal(err) } @@ -69,7 +69,7 @@ func benchmarkExists(b *testing.B, shardNum int) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - ok, err := e.exists(addr) + ok, err := e.exists(context.Background(), addr) if err != nil || ok { b.Fatalf("%t %v", ok, err) } diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 4ff019e4d..dc28d35fa 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -41,11 +41,7 @@ type testShard struct { func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) *testEngine { if dir == "" { - var err error - - dir, err = os.MkdirTemp("", "*") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) + dir = t.TempDir() } var testShards [2]*testShard @@ -98,7 +94,7 @@ func TestErrorReporting(t *testing.T) { var prm shard.PutPrm prm.SetObject(obj) te.ng.mtx.RLock() - _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm) + _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) te.ng.mtx.RUnlock() require.NoError(t, err) @@ -132,7 +128,7 @@ func TestErrorReporting(t *testing.T) { var prm shard.PutPrm prm.SetObject(obj) te.ng.mtx.RLock() - _, err := te.ng.shards[te.shards[0].id.String()].Put(prm) + _, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm) te.ng.mtx.RUnlock() require.NoError(t, err) @@ -171,9 +167,7 @@ func TestErrorReporting(t *testing.T) { } func TestBlobstorFailback(t *testing.T) { - dir, err := os.MkdirTemp("", "*") - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, os.RemoveAll(dir)) }) + dir := t.TempDir() te := newEngineWithErrorThreshold(t, dir, 1) @@ -185,7 +179,7 @@ func TestBlobstorFailback(t *testing.T) { var prm shard.PutPrm prm.SetObject(obj) te.ng.mtx.RLock() - _, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm) + _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) te.ng.mtx.RUnlock() require.NoError(t, err) objs = append(objs, obj) @@ -193,7 +187,7 @@ func TestBlobstorFailback(t *testing.T) { for i := range objs { addr := object.AddressOf(objs[i]) - _, err = te.ng.Get(context.Background(), GetPrm{addr: addr}) + _, err := te.ng.Get(context.Background(), GetPrm{addr: addr}) require.NoError(t, err) _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr}) require.NoError(t, err) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index f16413ea2..761ed24b9 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -15,10 +17,12 @@ import ( "go.uber.org/zap" ) +var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode") + // EvacuateShardPrm represents parameters for the EvacuateShard operation. type EvacuateShardPrm struct { shardID []*shard.ID - handler func(oid.Address, *objectSDK.Object) error + handler func(context.Context, oid.Address, *objectSDK.Object) error ignoreErrors bool } @@ -38,7 +42,7 @@ func (p *EvacuateShardPrm) WithIgnoreErrors(ignore bool) { } // WithFaultHandler sets handler to call for objects which cannot be saved on other shards. -func (p *EvacuateShardPrm) WithFaultHandler(f func(oid.Address, *objectSDK.Object) error) { +func (p *EvacuateShardPrm) WithFaultHandler(f func(context.Context, oid.Address, *objectSDK.Object) error) { p.handler = f } @@ -79,17 +83,18 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva } } - e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs)) + e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs)) var res EvacuateShardRes for _, shardID := range shardIDs { if err = e.evacuateShard(ctx, shardID, prm, &res, shards, weights, shardsToEvacuate); err != nil { + e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs)) return res, err } } - e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs)) + e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation, zap.Strings("shard_ids", shardIDs)) return res, nil } @@ -134,7 +139,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool) } if !sh.GetMode().ReadOnly() { - return nil, nil, shard.ErrMustBeReadOnly + return nil, nil, ErrMustBeReadOnly } } @@ -164,6 +169,11 @@ func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool) func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error { for i := range toEvacuate { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } addr := toEvacuate[i].Address var getPrm shard.GetPrm @@ -177,7 +187,12 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to return err } - if e.tryEvacuateObject(ctx, addr, getRes.Object(), sh, res, shards, weights, shardsToEvacuate) { + evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, res, shards, weights, shardsToEvacuate) + if err != nil { + return err + } + + if evacuatedLocal { continue } @@ -187,7 +202,7 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i]) } - err = prm.handler(addr, getRes.Object()) + err = prm.handler(ctx, addr, getRes.Object()) if err != nil { return err } @@ -196,25 +211,31 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to return nil } -func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, res *EvacuateShardRes, - shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) bool { +func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, res *EvacuateShardRes, + shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) (bool, error) { hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(addr.EncodeToString()))) for j := range shards { + select { + case <-ctx.Done(): + return false, ctx.Err() + default: + } + if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object) if putDone || exists { if putDone { - e.log.Debug("object is moved to another shard", + e.log.Debug(logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr)) res.count++ } - return true + return true, nil } } - return false + return false, nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index c116aeff9..bc5b05ef0 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "os" "path/filepath" "strconv" "testing" @@ -25,9 +24,7 @@ import ( ) func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) { - dir, err := os.MkdirTemp("", "*") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) + dir := t.TempDir() te := testNewEngine(t, WithShardPoolSize(1)). setShardsNumOpts(t, shardNum, func(id int) []shard.Option { @@ -57,7 +54,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err := e.shards[sh.String()].Put(putPrm) + _, err := e.shards[sh.String()].Put(context.Background(), putPrm) require.NoError(t, err) } @@ -67,7 +64,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng var putPrm PutPrm putPrm.WithObject(objects[len(objects)-1]) - _, err := e.Put(putPrm) + err := e.Put(context.Background(), putPrm) require.NoError(t, err) res, err := e.shards[ids[len(ids)-1].String()].List() @@ -80,6 +77,8 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng } func TestEvacuateShard(t *testing.T) { + t.Parallel() + const objPerShard = 3 e, ids, objects := newEngineEvacuate(t, 3, objPerShard) @@ -103,7 +102,7 @@ func TestEvacuateShard(t *testing.T) { t.Run("must be read-only", func(t *testing.T) { res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, shard.ErrMustBeReadOnly) + require.ErrorIs(t, err, ErrMustBeReadOnly) require.Equal(t, 0, res.Count()) }) @@ -135,11 +134,13 @@ func TestEvacuateShard(t *testing.T) { } func TestEvacuateNetwork(t *testing.T) { + t.Parallel() + var errReplication = errors.New("handler error") - acceptOneOf := func(objects []*objectSDK.Object, max int) func(oid.Address, *objectSDK.Object) error { + acceptOneOf := func(objects []*objectSDK.Object, max int) func(context.Context, oid.Address, *objectSDK.Object) error { var n int - return func(addr oid.Address, obj *objectSDK.Object) error { + return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) error { if n == max { return errReplication } @@ -157,6 +158,7 @@ func TestEvacuateNetwork(t *testing.T) { } t.Run("single shard", func(t *testing.T) { + t.Parallel() e, ids, objects := newEngineEvacuate(t, 1, 3) evacuateShardID := ids[0].String() @@ -176,6 +178,7 @@ func TestEvacuateNetwork(t *testing.T) { require.Equal(t, 2, res.Count()) }) t.Run("multiple shards, evacuate one", func(t *testing.T) { + t.Parallel() e, ids, objects := newEngineEvacuate(t, 2, 3) require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) @@ -198,6 +201,7 @@ func TestEvacuateNetwork(t *testing.T) { }) }) t.Run("multiple shards, evacuate many", func(t *testing.T) { + t.Parallel() e, ids, objects := newEngineEvacuate(t, 4, 5) evacuateIDs := ids[0:3] @@ -230,3 +234,29 @@ func TestEvacuateNetwork(t *testing.T) { }) }) } + +func TestEvacuateCancellation(t *testing.T) { + t.Parallel() + e, ids, _ := newEngineEvacuate(t, 2, 3) + + require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + + var prm EvacuateShardPrm + prm.shardID = ids[1:2] + prm.handler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + res, err := e.Evacuate(ctx, prm) + require.ErrorContains(t, err, "context canceled") + require.Equal(t, 0, res.Count()) +} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 3a8e09a6d..6208461e9 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -10,14 +10,14 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) -func (e *StorageEngine) exists(addr oid.Address) (bool, error) { +func (e *StorageEngine) exists(ctx context.Context, addr oid.Address) (bool, error) { var shPrm shard.ExistsPrm shPrm.SetAddress(addr) alreadyRemoved := false exists := false e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { - res, err := sh.Exists(context.TODO(), shPrm) + res, err := sh.Exists(ctx, shPrm) if err != nil { if shard.IsErrRemoved(err) { alreadyRemoved = true diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 7d17b50fa..683b7bde8 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -48,6 +48,12 @@ func (r GetRes) Object() *objectSDK.Object { // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + err = e.execIfNotBlocked(func() error { res, err = e.get(ctx, prm) return err @@ -57,12 +63,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er } func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.get", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - )) - defer span.End() - if e.metrics != nil { defer elapsed(e.metrics.AddGetDuration)() } diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index e5fd4b04f..bf00c4289 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -55,11 +55,11 @@ func TestHeadRaw(t *testing.T) { putPrmLink.SetObject(link) // put most left object in one shard - _, err := s1.Put(putPrmLeft) + _, err := s1.Put(context.Background(), putPrmLeft) require.NoError(t, err) // put link object in another shard - _, err = s2.Put(putPrmLink) + _, err = s2.Put(context.Background(), putPrmLink) require.NoError(t, err) // head with raw flag should return SplitInfoError diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index db9988338..b1204ed99 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -4,11 +4,15 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -61,6 +65,9 @@ var errInhumeFailure = errors.New("inhume operation failed") // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") + defer span.End() + err = e.execIfNotBlocked(func() error { res, err = e.inhume(ctx, prm) return err @@ -81,9 +88,9 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e for i := range prm.addrs { if !prm.forceRemoval { - locked, err := e.IsLocked(prm.addrs[i]) + locked, err := e.IsLocked(ctx, prm.addrs[i]) if err != nil { - e.log.Warn("removing an object without full locking check", + e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck, zap.Error(err), zap.Stringer("addr", prm.addrs[i])) } else if locked { @@ -180,13 +187,19 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh } // IsLocked checks whether an object is locked according to StorageEngine's state. -func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) { +func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked", + trace.WithAttributes( + attribute.String("address", addr.EncodeToString()), + )) + defer span.End() + var locked bool var err error var outErr error e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locked, err = h.Shard.IsLocked(addr) + locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr)) outErr = err @@ -205,7 +218,7 @@ func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) { func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - sh.HandleExpiredTombstones(addrs) + sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): @@ -222,7 +235,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l select { case <-ctx.Done(): - e.log.Info("interrupt processing the expired locks", zap.Error(ctx.Err())) + e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) return true default: return false @@ -236,7 +249,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A select { case <-ctx.Done(): - e.log.Info("interrupt processing the deleted locks", zap.Error(ctx.Err())) + e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) return true default: return false diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 4f8c96b99..924cf518b 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -42,7 +42,7 @@ func TestStorageEngine_Inhume(t *testing.T) { e := testNewEngine(t).setShardsNum(t, 1).engine defer e.Close() - err := Put(e, parent) + err := Put(context.Background(), e, parent) require.NoError(t, err) var inhumePrm InhumePrm @@ -51,7 +51,7 @@ func TestStorageEngine_Inhume(t *testing.T) { _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, fs) require.NoError(t, err) require.Empty(t, addrs) }) @@ -65,12 +65,12 @@ func TestStorageEngine_Inhume(t *testing.T) { var putChild shard.PutPrm putChild.SetObject(child) - _, err := s1.Put(putChild) + _, err := s1.Put(context.Background(), putChild) require.NoError(t, err) var putLink shard.PutPrm putLink.SetObject(link) - _, err = s2.Put(putLink) + _, err = s2.Put(context.Background(), putLink) require.NoError(t, err) var inhumePrm InhumePrm @@ -79,7 +79,7 @@ func TestStorageEngine_Inhume(t *testing.T) { _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, fs) require.NoError(t, err) require.Empty(t, addrs) }) diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go index 8644a7f7e..8781416f4 100644 --- a/pkg/local_object_storage/engine/list.go +++ b/pkg/local_object_storage/engine/list.go @@ -1,6 +1,7 @@ package engine import ( + "math/rand" "sort" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -12,10 +13,38 @@ import ( // cursor. Use nil cursor object to start listing again. var ErrEndOfListing = shard.ErrEndOfListing -// Cursor is a type for continuous object listing. +// Cursor is a type for continuous object listing. Cursor contains shard IDs to read +// and shard cursors that contain state from previous read. type Cursor struct { - shardID string - shardCursor *shard.Cursor + current string + shardIDs map[string]bool + shardIDToCursor map[string]*shard.Cursor +} + +func (c *Cursor) getCurrentShardCursor() *shard.Cursor { + return c.shardIDToCursor[c.current] +} + +func (c *Cursor) setCurrentShardCursor(sc *shard.Cursor) { + c.shardIDToCursor[c.current] = sc +} + +func (c *Cursor) nextShard() bool { + var shardsToRead []string + for shardID, read := range c.shardIDs { + if !read { + shardsToRead = append(shardsToRead, shardID) + } + } + if len(shardsToRead) == 0 { + return false + } + c.current = shardsToRead[rand.Intn(len(shardsToRead))] + return true +} + +func (c *Cursor) setShardRead(shardID string) { + c.shardIDs[shardID] = true } // ListWithCursorPrm contains parameters for ListWithCursor operation. @@ -57,65 +86,69 @@ func (l ListWithCursorRes) Cursor() *Cursor { // Does not include inhumed objects. Use cursor value from the response // for consecutive requests. // +// If count param is big enough, then the method reads objects from different shards +// by portions. In this case shards are chosen randomly, if they're not read out yet. +// +// Adding a shard between ListWithCursor does not invalidate the cursor but new shard +// won't be listed. +// Removing a shard between ListWithCursor leads to the undefined behavior +// (e.g. usage of the objects from the removed shard). +// // Returns ErrEndOfListing if there are no more objects to return or count // parameter set to zero. func (e *StorageEngine) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes, error) { result := make([]objectcore.AddressWithType, 0, prm.count) - // 1. Get available shards and sort them. - e.mtx.RLock() - shardIDs := make([]string, 0, len(e.shards)) - for id := range e.shards { - shardIDs = append(shardIDs, id) - } - e.mtx.RUnlock() - - if len(shardIDs) == 0 { - return ListWithCursorRes{}, ErrEndOfListing - } - - sort.Slice(shardIDs, func(i, j int) bool { - return shardIDs[i] < shardIDs[j] - }) - - // 2. Prepare cursor object. + // Set initial cursors cursor := prm.cursor if cursor == nil { - cursor = &Cursor{shardID: shardIDs[0]} + shardIDs := getSortedShardIDs(e) + if len(shardIDs) == 0 { + return ListWithCursorRes{}, ErrEndOfListing + } + cursor = newCursor(shardIDs) } - // 3. Iterate over available shards. Skip unavailable shards. - for i := range shardIDs { + const ( + splitShardCountLimit = 100 + shardsNum = 4 + ) + + batchSize := prm.count + if batchSize >= splitShardCountLimit { + batchSize /= shardsNum + } + + for cursor.nextShard() { if len(result) >= int(prm.count) { break } - - if shardIDs[i] < cursor.shardID { - continue - } + curr := cursor.current e.mtx.RLock() - shardInstance, ok := e.shards[shardIDs[i]] + shardInstance, ok := e.shards[curr] e.mtx.RUnlock() if !ok { + cursor.setShardRead(curr) continue } - count := uint32(int(prm.count) - len(result)) + count := prm.count - uint32(len(result)) + if count > batchSize { + count = batchSize + } + var shardPrm shard.ListWithCursorPrm shardPrm.WithCount(count) - if shardIDs[i] == cursor.shardID { - shardPrm.WithCursor(cursor.shardCursor) - } + shardPrm.WithCursor(cursor.getCurrentShardCursor()) res, err := shardInstance.ListWithCursor(shardPrm) if err != nil { + cursor.setShardRead(curr) continue } - result = append(result, res.AddressList()...) - cursor.shardCursor = res.Cursor() - cursor.shardID = shardIDs[i] + cursor.setCurrentShardCursor(res.Cursor()) } if len(result) == 0 { @@ -127,3 +160,23 @@ func (e *StorageEngine) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes cursor: cursor, }, nil } + +func getSortedShardIDs(e *StorageEngine) []string { + e.mtx.RLock() + shardIDs := make([]string, 0, len(e.shards)) + for id := range e.shards { + shardIDs = append(shardIDs, id) + } + e.mtx.RUnlock() + sort.Strings(shardIDs) + return shardIDs +} + +func newCursor(shardIDs []string) *Cursor { + shardIDsMap := make(map[string]bool) + shardIDToCursor := make(map[string]*shard.Cursor) + for _, shardID := range shardIDs { + shardIDsMap[shardID] = false + } + return &Cursor{shardIDs: shardIDsMap, shardIDToCursor: shardIDToCursor} +} diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index 1261de9d4..5b927cf11 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -1,77 +1,124 @@ package engine import ( - "errors" + "context" + "fmt" "os" + "path/filepath" "sort" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" + "go.uber.org/zap" ) -func TestListWithCursor(t *testing.T) { - s1 := testNewShard(t, 1) - s2 := testNewShard(t, 2) - e := testNewEngine(t).setInitializedShards(t, s1, s2).engine - - t.Cleanup(func() { - e.Close() - os.RemoveAll(t.Name()) - }) - - const total = 20 - - expected := make([]object.AddressWithType, 0, total) - got := make([]object.AddressWithType, 0, total) - - for i := 0; i < total; i++ { - containerID := cidtest.ID() - obj := testutil.GenerateObjectWithCID(containerID) - - var prm PutPrm - prm.WithObject(obj) - - _, err := e.Put(prm) - require.NoError(t, err) - expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)}) - } - - expected = sortAddresses(expected) - - var prm ListWithCursorPrm - prm.WithCount(1) - - res, err := e.ListWithCursor(prm) - require.NoError(t, err) - require.NotEmpty(t, res.AddressList()) - got = append(got, res.AddressList()...) - - for i := 0; i < total-1; i++ { - prm.WithCursor(res.Cursor()) - - res, err = e.ListWithCursor(prm) - if errors.Is(err, ErrEndOfListing) { - break - } - got = append(got, res.AddressList()...) - } - - prm.WithCursor(res.Cursor()) - - _, err = e.ListWithCursor(prm) - require.ErrorIs(t, err, ErrEndOfListing) - - got = sortAddresses(got) - require.Equal(t, expected, got) -} - func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType { sort.Slice(addrWithType, func(i, j int) bool { return addrWithType[i].Address.EncodeToString() < addrWithType[j].Address.EncodeToString() }) return addrWithType } + +func TestListWithCursor(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + shardNum int + objectNum int + batchSize uint32 + }{ + { + name: "one shard, few objects, small batch size", + shardNum: 1, + objectNum: 2, + batchSize: 1, + }, + { + name: "one shard, many objects, big batch size", + shardNum: 1, + objectNum: 53, + batchSize: 100, + }, + { + name: "many shards, many objects, small batch size", + shardNum: 6, + objectNum: 66, + batchSize: 1, + }, + { + name: "many shards, many objects, big batch size", + shardNum: 6, + objectNum: 99, + batchSize: 100, + }, + } + for i := range tests { + tt := tests[i] + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option { + return []shard.Option{ + shard.WithLogger(&logger.Logger{Logger: zap.L()}), + shard.WithBlobStorOptions( + blobstor.WithStorages( + newStorages(filepath.Join(t.Name(), tt.name, fmt.Sprintf("%d.blobstor", id)), + 1<<20))), + shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.Name(), tt.name, fmt.Sprintf("%d.pilorama", id)))), + shard.WithMetaBaseOptions( + meta.WithPath(filepath.Join(t.Name(), tt.name, fmt.Sprintf("%d.metabase", id))), + meta.WithPermissions(0700), + meta.WithEpochState(epochState{}), + )} + }).engine + require.NoError(t, e.Open()) + require.NoError(t, e.Init(context.Background())) + + t.Cleanup(func() { + e.Close() + os.RemoveAll(t.Name()) + }) + + expected := make([]object.AddressWithType, 0, tt.objectNum) + got := make([]object.AddressWithType, 0, tt.objectNum) + + for i := 0; i < tt.objectNum; i++ { + containerID := cidtest.ID() + obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) + + var prm PutPrm + prm.WithObject(obj) + + err := e.Put(context.Background(), prm) + require.NoError(t, err) + expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)}) + } + expected = sortAddresses(expected) + + var prm ListWithCursorPrm + prm.count = tt.batchSize + for { + res, err := e.ListWithCursor(prm) + if err == ErrEndOfListing { + require.Empty(t, res.AddressList()) + break + } + require.NotEmpty(t, res.AddressList()) + got = append(got, res.AddressList()...) + prm.cursor = res.Cursor() + } + + got = sortAddresses(got) + require.Equal(t, expected, got) + }) + } +} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 60a1d9c9f..4562c1a57 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -4,12 +4,15 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var errLockFailed = errors.New("lock operation failed") @@ -20,19 +23,27 @@ var errLockFailed = errors.New("lock operation failed") // Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). // // Locked list should be unique. Panics if it is empty. -func (e *StorageEngine) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error { +func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock", + trace.WithAttributes( + attribute.String("container_id", idCnr.EncodeToString()), + attribute.String("locker", locker.EncodeToString()), + attribute.Int("locked_count", len(locked)), + )) + defer span.End() + return e.execIfNotBlocked(func() error { - return e.lock(idCnr, locker, locked) + return e.lock(ctx, idCnr, locker, locked) }) } -func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error { +func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - switch e.lockSingle(idCnr, locker, locked[i], true) { + switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { case 1: return logicerr.Wrap(apistatus.LockNonRegularObject{}) case 0: - switch e.lockSingle(idCnr, locker, locked[i], false) { + switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { case 1: return logicerr.Wrap(apistatus.LockNonRegularObject{}) case 0: @@ -48,7 +59,7 @@ func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { // code is pretty similar to inhumeAddr, maybe unify? root := false var errIrregular apistatus.LockNonRegularObject @@ -70,7 +81,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi var existsPrm shard.ExistsPrm existsPrm.SetAddress(addrLocked) - exRes, err := sh.Exists(context.TODO(), existsPrm) + exRes, err := sh.Exists(ctx, existsPrm) if err != nil { var siErr *objectSDK.SplitInfoError if !errors.As(err, &siErr) { @@ -90,7 +101,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi } } - err := sh.Lock(idCnr, locker, []oid.ID{locked}) + err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked}) if err != nil { e.reportShardError(sh, "could not lock object in shard", err) diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index fd3b04ef0..7796913ea 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -31,6 +31,8 @@ func (t tss) IsTombstoneAvailable(ctx context.Context, _ oid.Address, epoch uint } func TestLockUserScenario(t *testing.T) { + t.Parallel() + // Tested user actions: // 1. stores some object // 2. locks the object @@ -99,7 +101,7 @@ func TestLockUserScenario(t *testing.T) { id, _ := obj.ID() objAddr.SetObject(id) - err = Put(e, obj) + err = Put(context.Background(), e, obj) require.NoError(t, err) // 2. @@ -107,10 +109,10 @@ func TestLockUserScenario(t *testing.T) { locker.WriteMembers([]oid.ID{id}) object.WriteLock(lockerObj, locker) - err = Put(e, lockerObj) + err = Put(context.Background(), e, lockerObj) require.NoError(t, err) - err = e.Lock(cnr, lockerID, []oid.ID{id}) + err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id}) require.NoError(t, err) // 3. @@ -125,7 +127,7 @@ func TestLockUserScenario(t *testing.T) { tombObj.SetID(tombForLockID) tombObj.SetAttributes(a) - err = Put(e, tombObj) + err = Put(context.Background(), e, tombObj) require.NoError(t, err) inhumePrm.WithTarget(tombForLockAddr, lockerAddr) @@ -146,6 +148,8 @@ func TestLockUserScenario(t *testing.T) { } func TestLockExpiration(t *testing.T) { + t.Parallel() + // Tested scenario: // 1. some object is stored // 2. lock object for it is stored, and the object is locked @@ -180,7 +184,7 @@ func TestLockExpiration(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(e, obj) + err = Put(context.Background(), e, obj) require.NoError(t, err) // 2. @@ -192,13 +196,13 @@ func TestLockExpiration(t *testing.T) { lock.SetType(object.TypeLock) lock.SetAttributes(a) - err = Put(e, lock) + err = Put(context.Background(), e, lock) require.NoError(t, err) id, _ := obj.ID() idLock, _ := lock.ID() - err = e.Lock(cnr, idLock, []oid.ID{id}) + err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id}) require.NoError(t, err) var inhumePrm InhumePrm @@ -222,6 +226,8 @@ func TestLockExpiration(t *testing.T) { } func TestLockForceRemoval(t *testing.T) { + t.Parallel() + // Tested scenario: // 1. some object is stored // 2. lock object for it is stored, and the object is locked @@ -255,20 +261,20 @@ func TestLockForceRemoval(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(e, obj) + err = Put(context.Background(), e, obj) require.NoError(t, err) // 2. lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(object.TypeLock) - err = Put(e, lock) + err = Put(context.Background(), e, lock) require.NoError(t, err) id, _ := obj.ID() idLock, _ := lock.ID() - err = e.Lock(cnr, idLock, []oid.ID{id}) + err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id}) require.NoError(t, err) // 3. diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 5f9105efc..0543f9f15 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -4,6 +4,8 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -11,6 +13,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -19,9 +23,6 @@ type PutPrm struct { obj *objectSDK.Object } -// PutRes groups the resulting values of Put operation. -type PutRes struct{} - var errPutShard = errors.New("could not put object to any shard") // WithObject is a Put option to set object to save. @@ -39,16 +40,22 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) { // Returns an error if executions are blocked (see BlockExecution). // // Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed. -func (e *StorageEngine) Put(prm PutPrm) (res PutRes, err error) { +func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put", + trace.WithAttributes( + attribute.String("address", object.AddressOf(prm.obj).EncodeToString()), + )) + defer span.End() + err = e.execIfNotBlocked(func() error { - res, err = e.put(prm) + err = e.put(ctx, prm) return err }) return } -func (e *StorageEngine) put(prm PutPrm) (PutRes, error) { +func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { if e.metrics != nil { defer elapsed(e.metrics.AddPutDuration)() } @@ -57,9 +64,9 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) { // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. - _, err := e.exists(addr) + _, err := e.exists(ctx, addr) if err != nil { - return PutRes{}, err + return err } finished := false @@ -73,7 +80,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) { return false } - putDone, exists := e.putToShard(context.TODO(), sh, ind, pool, addr, prm.obj) + putDone, exists := e.putToShard(ctx, sh, ind, pool, addr, prm.obj) finished = putDone || exists return finished }) @@ -82,7 +89,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) { err = errPutShard } - return PutRes{}, err + return err } // putToShard puts object to sh. @@ -116,9 +123,9 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, var toMoveItPrm shard.ToMoveItPrm toMoveItPrm.SetAddress(addr) - _, err = sh.ToMoveIt(toMoveItPrm) + _, err = sh.ToMoveIt(ctx, toMoveItPrm) if err != nil { - e.log.Warn("could not mark object for shard relocation", + e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation, zap.Stringer("shard", sh.ID()), zap.String("error", err.Error()), ) @@ -131,11 +138,11 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err = sh.Put(putPrm) + _, err = sh.Put(ctx, putPrm) if err != nil { if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn("could not put object to shard", + e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error())) return @@ -156,11 +163,9 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, } // Put writes provided object to local storage. -func Put(storage *StorageEngine, obj *objectSDK.Object) error { +func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error { var putPrm PutPrm putPrm.WithObject(obj) - _, err := storage.Put(putPrm) - - return err + return storage.Put(ctx, putPrm) } diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index c50c0844c..1ea569928 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -42,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat prm.Concurrency = defaultRemoveDuplicatesConcurrency } - e.log.Info("starting removal of locally-redundant copies", + e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies, zap.Int("concurrency", prm.Concurrency)) // The mutext must be taken for the whole duration to avoid target shard being removed @@ -54,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0. // However we could change weights in future and easily forget this function. for _, sh := range e.shards { - e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String())) + e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.String("shard_id", sh.ID().String())) ch := make(chan oid.Address) errG, ctx := errgroup.WithContext(ctx) @@ -92,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat }) } if err := errG.Wait(); err != nil { - e.log.Error("finished removal of locally-redundant copies", zap.Error(err)) + e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) return err } } - e.log.Info("finished removal of locally-redundant copies") + e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies) return nil } @@ -128,7 +129,7 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address var deletePrm shard.DeletePrm deletePrm.SetAddresses(addr) - _, err = shards[i].Delete(deletePrm) + _, err = shards[i].Delete(ctx, deletePrm) if err != nil { return err } diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go index 4415d01c8..8131fcf0d 100644 --- a/pkg/local_object_storage/engine/remove_copies_test.go +++ b/pkg/local_object_storage/engine/remove_copies_test.go @@ -17,6 +17,8 @@ import ( ) func TestRebalance(t *testing.T) { + t.Parallel() + te := newEngineWithErrorThreshold(t, "", 0) const ( @@ -49,10 +51,10 @@ func TestRebalance(t *testing.T) { te.ng.mtx.RLock() // Every 3rd object (i%3 == 0) is put to both shards, others are distributed. if i%3 != 1 { - _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm) + _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) } if i%3 != 2 { - _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm) + _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) } te.ng.mtx.RUnlock() @@ -101,6 +103,8 @@ loop: } func TestRebalanceSingleThread(t *testing.T) { + t.Parallel() + te := newEngineWithErrorThreshold(t, "", 0) obj := testutil.GenerateObjectWithCID(cidtest.ID()) @@ -109,8 +113,8 @@ func TestRebalanceSingleThread(t *testing.T) { var prm shard.PutPrm prm.SetObject(obj) te.ng.mtx.RLock() - _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm) - _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm) + _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) + _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) te.ng.mtx.RUnlock() require.NoError(t, err1) require.NoError(t, err2) @@ -162,8 +166,8 @@ func TestRebalanceExitByContext(t *testing.T) { prm.SetObject(objects[i]) te.ng.mtx.RLock() - _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm) - _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm) + _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm) + _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm) te.ng.mtx.RUnlock() require.NoError(t, err1) diff --git a/pkg/local_object_storage/engine/restore.go b/pkg/local_object_storage/engine/restore.go deleted file mode 100644 index 84c750cd0..000000000 --- a/pkg/local_object_storage/engine/restore.go +++ /dev/null @@ -1,19 +0,0 @@ -package engine - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - -// RestoreShard restores objects from dump to the shard with provided identifier. -// -// Returns an error if shard is not read-only. -func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error { - e.mtx.RLock() - defer e.mtx.RUnlock() - - sh, ok := e.shards[id.String()] - if !ok { - return errShardNotFound - } - - _, err := sh.Restore(prm) - return err -} diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 7b9b8be60..e1039ea23 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -1,10 +1,15 @@ package engine import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // SelectPrm groups the parameters of Select operation. @@ -38,16 +43,22 @@ func (r SelectRes) AddressList() []oid.Address { // Returns any error encountered that did not allow to completely select the objects. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Select(prm SelectPrm) (res SelectRes, err error) { +func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select", + trace.WithAttributes( + attribute.String("container_id", prm.cnr.EncodeToString()), + )) + defer span.End() + err = e.execIfNotBlocked(func() error { - res, err = e._select(prm) + res, err = e._select(ctx, prm) return err }) return } -func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) { +func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { if e.metrics != nil { defer elapsed(e.metrics.AddSearchDuration)() } @@ -62,7 +73,7 @@ func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) { shPrm.SetFilters(prm.filters) e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - res, err := sh.Select(shPrm) + res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(sh, "could not select objects from shard", err) return false @@ -133,12 +144,12 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) { } // Select selects objects from local storage using provided filters. -func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) { +func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) { var selectPrm SelectPrm selectPrm.WithContainerID(cnr) selectPrm.WithFilters(fs) - res, err := storage.Select(selectPrm) + res, err := storage.Select(ctx, selectPrm) if err != nil { return nil, err } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 2b1146ff2..64546d9ef 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -3,6 +3,7 @@ package engine import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -168,7 +169,7 @@ func (e *StorageEngine) removeShards(ids ...string) { delete(e.shardPools, id) } - e.log.Info("shard has been removed", + e.log.Info(logs.EngineShardHasBeenRemoved, zap.String("id", id)) } e.mtx.Unlock() @@ -176,7 +177,7 @@ func (e *StorageEngine) removeShards(ids ...string) { for _, sh := range ss { err := sh.Close() if err != nil { - e.log.Error("could not close removed shard", + e.log.Error(logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index b69ab4890..e7d66094c 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -1,24 +1,39 @@ package engine import ( + "context" "errors" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) var _ pilorama.Forest = (*StorageEngine)(nil) // TreeMove implements the pilorama.Forest interface. -func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { - index, lst, err := e.getTreeShard(d.CID, treeID) +func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeMove", + trace.WithAttributes( + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, d.CID, treeID) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { return nil, err } - lm, err := lst[index].TreeMove(d, treeID, m) + lm, err := lst[index].TreeMove(ctx, d, treeID, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(lst[index], "can't perform `TreeMove`", err, @@ -32,13 +47,26 @@ func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pil } // TreeAddByPath implements the pilorama.Forest interface. -func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) { - index, lst, err := e.getTreeShard(d.CID, treeID) +func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeAddByPath", + trace.WithAttributes( + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Int("meta_count", len(m)), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, d.CID, treeID) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { return nil, err } - lm, err := lst[index].TreeAddByPath(d, treeID, attr, path, m) + lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err, @@ -51,13 +79,22 @@ func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, a } // TreeApply implements the pilorama.Forest interface. -func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { - index, lst, err := e.getTreeShard(cnr, treeID) +func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApply", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.Bool("background", backgroundSync), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, cnr, treeID) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { return err } - err = lst[index].TreeApply(cnr, treeID, m, backgroundSync) + err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(lst[index], "can't perform `TreeApply`", err, @@ -70,11 +107,22 @@ func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move } // TreeGetByPath implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { +func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Bool("latest", latest), + ), + ) + defer span.End() + var err error var nodes []pilorama.Node for _, sh := range e.sortShardsByWeight(cid) { - nodes, err = sh.TreeGetByPath(cid, treeID, attr, path, latest) + nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -92,12 +140,21 @@ func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, } // TreeGetMeta implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { +func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetMeta", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + var err error var m pilorama.Meta var p uint64 for _, sh := range e.sortShardsByWeight(cid) { - m, p, err = sh.TreeGetMeta(cid, treeID, nodeID) + m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -115,11 +172,20 @@ func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID piloram } // TreeGetChildren implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) { +func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetChildren", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + var err error var nodes []uint64 for _, sh := range e.sortShardsByWeight(cid) { - nodes, err = sh.TreeGetChildren(cid, treeID, nodeID) + nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -137,11 +203,20 @@ func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pil } // TreeGetOpLog implements the pilorama.Forest interface. -func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { +func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + var err error var lm pilorama.Move for _, sh := range e.sortShardsByWeight(cid) { - lm, err = sh.TreeGetOpLog(cid, treeID, height) + lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -159,10 +234,18 @@ func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64 } // TreeDrop implements the pilorama.Forest interface. -func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error { +func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeDrop", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + var err error for _, sh := range e.sortShardsByWeight(cid) { - err = sh.TreeDrop(cid, treeID) + err = sh.TreeDrop(ctx, cid, treeID) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -180,11 +263,18 @@ func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error { } // TreeList implements the pilorama.Forest interface. -func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) { +func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeList", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + ), + ) + defer span.End() + var resIDs []string for _, sh := range e.unsortedShards() { - ids, err := sh.TreeList(cid) + ids, err := sh.TreeList(ctx, cid) if err != nil { if errors.Is(err, shard.ErrPiloramaDisabled) || errors.Is(err, shard.ErrReadOnlyMode) { return nil, err @@ -205,8 +295,16 @@ func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) { } // TreeExists implements the pilorama.Forest interface. -func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { - _, _, err := e.getTreeShard(cid, treeID) +func (e *StorageEngine) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeExists", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + _, _, err := e.getTreeShard(ctx, cid, treeID) if errors.Is(err, pilorama.ErrTreeNotFound) { return false, nil } @@ -214,13 +312,22 @@ func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { } // TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error { - index, lst, err := e.getTreeShard(cid, treeID) +func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeUpdateLastSyncHeight", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, cid, treeID) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { return err } - err = lst[index].TreeUpdateLastSyncHeight(cid, treeID, height) + err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height) if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), @@ -230,11 +337,19 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, h } // TreeLastSyncHeight implements the pilorama.Forest interface. -func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) { +func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeLastSyncHeight", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + var err error var height uint64 for _, sh := range e.sortShardsByWeight(cid) { - height, err = sh.TreeLastSyncHeight(cid, treeID) + height, err = sh.TreeLastSyncHeight(ctx, cid, treeID) if err != nil { if err == shard.ErrPiloramaDisabled { break @@ -251,10 +366,10 @@ func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64 return height, err } -func (e *StorageEngine) getTreeShard(cid cidSDK.ID, treeID string) (int, []hashedShard, error) { +func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) { lst := e.sortShardsByWeight(cid) for i, sh := range lst { - exists, err := sh.TreeExists(cid, treeID) + exists, err := sh.TreeExists(ctx, cid, treeID) if err != nil { return 0, nil, err } diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 611c691f1..c2bae9772 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -1,6 +1,7 @@ package engine import ( + "context" "strconv" "testing" @@ -31,11 +32,11 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { for i := 0; i < objCount; i++ { obj := testutil.GenerateObjectWithCID(cid) testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) - err := Put(te.ng, obj) + err := Put(context.Background(), te.ng, obj) if err != nil { b.Fatal(err) } - _, err = te.ng.TreeAddByPath(d, treeID, pilorama.AttributeFilename, nil, + _, err = te.ng.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, nil, []pilorama.KeyValue{{pilorama.AttributeFilename, []byte(strconv.Itoa(i))}}) if err != nil { b.Fatal(err) @@ -51,7 +52,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { prm.WithFilters(fs) for i := 0; i < b.N; i++ { - res, err := te.ng.Select(prm) + res, err := te.ng.Select(context.Background(), prm) if err != nil { b.Fatal(err) } @@ -62,7 +63,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { }) b.Run("TreeGetByPath", func(b *testing.B) { for i := 0; i < b.N; i++ { - nodes, err := te.ng.TreeGetByPath(cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) + nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) if err != nil { b.Fatal(err) } diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go index 26600a3eb..4effb2b16 100644 --- a/pkg/local_object_storage/engine/writecache.go +++ b/pkg/local_object_storage/engine/writecache.go @@ -1,7 +1,12 @@ package engine import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // FlushWriteCachePrm groups the parameters of FlushWriteCache operation. @@ -26,7 +31,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) { type FlushWriteCacheRes struct{} // FlushWriteCache flushes write-cache on a single shard. -func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRes, error) { +func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache", + trace.WithAttributes( + attribute.String("shard)id", p.shardID.String()), + attribute.Bool("ignore_errors", p.ignoreErrors), + )) + defer span.End() + e.mtx.RLock() sh, ok := e.shards[p.shardID.String()] e.mtx.RUnlock() @@ -38,5 +50,5 @@ func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRe var prm shard.FlushWriteCachePrm prm.SetIgnoreErrors(p.ignoreErrors) - return FlushWriteCacheRes{}, sh.FlushWriteCache(prm) + return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm) } diff --git a/pkg/local_object_storage/metabase/children.go b/pkg/local_object_storage/metabase/children.go new file mode 100644 index 000000000..f0591b43c --- /dev/null +++ b/pkg/local_object_storage/metabase/children.go @@ -0,0 +1,57 @@ +package meta + +import ( + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.etcd.io/bbolt" +) + +// GetChildren returns parent -> children map. +// If an object has no children, then map will contain addr -> empty slice value. +func (db *DB) GetChildren(addresses []oid.Address) (map[oid.Address][]oid.Address, error) { + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return nil, ErrDegradedMode + } + + result := make(map[oid.Address][]oid.Address, len(addresses)) + + buffer := make([]byte, bucketKeySize) + err := db.boltDB.View(func(tx *bbolt.Tx) error { + for _, addr := range addresses { + if _, found := result[addr]; found { + continue + } + + result[addr] = []oid.Address{} + bkt := tx.Bucket(parentBucketName(addr.Container(), buffer)) + if bkt == nil { + continue + } + + binObjIDs, err := decodeList(bkt.Get(objectKey(addr.Object(), buffer))) + if err != nil { + return err + } + + for _, binObjID := range binObjIDs { + var id oid.ID + if err = id.Decode(binObjID); err != nil { + return err + } + var resultAddress oid.Address + resultAddress.SetContainer(addr.Container()) + resultAddress.SetObject(id) + result[addr] = append(result[addr], resultAddress) + } + } + return nil + }) + + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index ef2bba638..c0565b35a 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -15,6 +15,8 @@ import ( ) func TestDB_Containers(t *testing.T) { + t.Parallel() + db := newDB(t) const N = 10 @@ -90,6 +92,8 @@ func TestDB_Containers(t *testing.T) { } func TestDB_ContainersCount(t *testing.T) { + t.Parallel() + db := newDB(t) const R, T, SG, L = 10, 11, 12, 13 // amount of object per type @@ -100,7 +104,6 @@ func TestDB_ContainersCount(t *testing.T) { }{ {R, objectSDK.TypeRegular}, {T, objectSDK.TypeTombstone}, - {SG, objectSDK.TypeStorageGroup}, {L, objectSDK.TypeLock}, } @@ -134,6 +137,8 @@ func TestDB_ContainersCount(t *testing.T) { } func TestDB_ContainerSize(t *testing.T) { + t.Parallel() + db := newDB(t) const ( diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 1a19c3e2a..4ae802aaa 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -5,6 +5,7 @@ import ( "fmt" "path/filepath" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -25,7 +26,7 @@ func (db *DB) Open(readOnly bool) error { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path)) + db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -46,9 +47,9 @@ func (db *DB) openBolt() error { db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug("opened boltDB instance for Metabase") + db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug("checking metabase version") + db.log.Debug(logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go index 17f3b3893..b67e748b3 100644 --- a/pkg/local_object_storage/metabase/control_test.go +++ b/pkg/local_object_storage/metabase/control_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -55,6 +56,6 @@ func metaExists(db *meta.DB, addr oid.Address) (bool, error) { var existsPrm meta.ExistsPrm existsPrm.SetAddress(addr) - res, err := db.Exists(existsPrm) + res, err := db.Exists(context.Background(), existsPrm) return res.Exists(), err } diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index d93bc436b..507bfcd89 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -1,6 +1,8 @@ package meta_test import ( + "context" + "os" "testing" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -15,19 +17,23 @@ import ( const objCount = 10 func TestCounters(t *testing.T) { - db := newDB(t) - - var c meta.ObjectCounters - var err error + t.Parallel() + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(t.Name())) + }) t.Run("defaults", func(t *testing.T) { - c, err = db.ObjectCounters() + t.Parallel() + db := newDB(t) + c, err := db.ObjectCounters() require.NoError(t, err) require.Zero(t, c.Phy()) require.Zero(t, c.Logic()) }) t.Run("put", func(t *testing.T) { + t.Parallel() + db := newDB(t) oo := make([]*object.Object, 0, objCount) for i := 0; i < objCount; i++ { oo = append(oo, testutil.GenerateObject()) @@ -38,10 +44,10 @@ func TestCounters(t *testing.T) { for i := 0; i < objCount; i++ { prm.SetObject(oo[i]) - _, err = db.Put(prm) + _, err := db.Put(context.Background(), prm) require.NoError(t, err) - c, err = db.ObjectCounters() + c, err := db.ObjectCounters() require.NoError(t, err) require.Equal(t, uint64(i+1), c.Phy()) @@ -49,20 +55,20 @@ func TestCounters(t *testing.T) { } }) - require.NoError(t, db.Reset()) - t.Run("delete", func(t *testing.T) { + t.Parallel() + db := newDB(t) oo := putObjs(t, db, objCount, false) var prm meta.DeletePrm for i := objCount - 1; i >= 0; i-- { prm.SetAddresses(objectcore.AddressOf(oo[i])) - res, err := db.Delete(prm) + res, err := db.Delete(context.Background(), prm) require.NoError(t, err) require.Equal(t, uint64(1), res.AvailableObjectsRemoved()) - c, err = db.ObjectCounters() + c, err := db.ObjectCounters() require.NoError(t, err) require.Equal(t, uint64(i), c.Phy()) @@ -70,9 +76,9 @@ func TestCounters(t *testing.T) { } }) - require.NoError(t, db.Reset()) - t.Run("inhume", func(t *testing.T) { + t.Parallel() + db := newDB(t) oo := putObjs(t, db, objCount, false) inhumedObjs := make([]oid.Address, objCount/2) @@ -89,20 +95,20 @@ func TestCounters(t *testing.T) { prm.SetTombstoneAddress(oidtest.Address()) prm.SetAddresses(inhumedObjs...) - res, err := db.Inhume(prm) + res, err := db.Inhume(context.Background(), prm) require.NoError(t, err) require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed()) - c, err = db.ObjectCounters() + c, err := db.ObjectCounters() require.NoError(t, err) require.Equal(t, uint64(objCount), c.Phy()) require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic()) }) - require.NoError(t, db.Reset()) - t.Run("put_split", func(t *testing.T) { + t.Parallel() + db := newDB(t) parObj := testutil.GenerateObject() // put objects and check that parent info @@ -115,16 +121,16 @@ func TestCounters(t *testing.T) { require.NoError(t, putBig(db, o)) - c, err = db.ObjectCounters() + c, err := db.ObjectCounters() require.NoError(t, err) require.Equal(t, uint64(i+1), c.Phy()) require.Equal(t, uint64(i+1), c.Logic()) } }) - require.NoError(t, db.Reset()) - t.Run("delete_split", func(t *testing.T) { + t.Parallel() + db := newDB(t) oo := putObjs(t, db, objCount, true) // delete objects that have parent info @@ -140,9 +146,9 @@ func TestCounters(t *testing.T) { } }) - require.NoError(t, db.Reset()) - t.Run("inhume_split", func(t *testing.T) { + t.Parallel() + db := newDB(t) oo := putObjs(t, db, objCount, true) inhumedObjs := make([]oid.Address, objCount/2) @@ -159,10 +165,10 @@ func TestCounters(t *testing.T) { prm.SetTombstoneAddress(oidtest.Address()) prm.SetAddresses(inhumedObjs...) - _, err = db.Inhume(prm) + _, err := db.Inhume(context.Background(), prm) require.NoError(t, err) - c, err = db.ObjectCounters() + c, err := db.ObjectCounters() require.NoError(t, err) require.Equal(t, uint64(objCount), c.Phy()) @@ -223,7 +229,7 @@ func TestCounters_Expired(t *testing.T) { inhumePrm.SetGCMark() inhumePrm.SetAddresses(oo[0]) - inhumeRes, err := db.Inhume(inhumePrm) + inhumeRes, err := db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) require.Equal(t, uint64(1), inhumeRes.AvailableInhumed()) @@ -240,7 +246,7 @@ func TestCounters_Expired(t *testing.T) { var deletePrm meta.DeletePrm deletePrm.SetAddresses(oo[0]) - deleteRes, err := db.Delete(deletePrm) + deleteRes, err := db.Delete(context.Background(), deletePrm) require.NoError(t, err) require.Zero(t, deleteRes.AvailableObjectsRemoved()) @@ -257,7 +263,7 @@ func TestCounters_Expired(t *testing.T) { deletePrm.SetAddresses(oo[0]) - deleteRes, err = db.Delete(deletePrm) + deleteRes, err = db.Delete(context.Background(), deletePrm) require.NoError(t, err) require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved()) @@ -284,7 +290,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*object.Ob oo = append(oo, o) prm.SetObject(o) - _, err = db.Put(prm) + _, err = db.Put(context.Background(), prm) require.NoError(t, err) c, err := db.ObjectCounters() diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 79f870372..5340f5d08 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -2,15 +2,19 @@ package meta import ( "bytes" + "context" "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // DeletePrm groups the parameters of Delete operation. @@ -65,7 +69,13 @@ type referenceNumber struct { type referenceCounter map[string]*referenceNumber // Delete removed object records from metabase indexes. -func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) { +func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Delete", + trace.WithAttributes( + attribute.Int("addr_count", len(prm.addrs)), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -335,8 +345,6 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error bucketName = primaryBucketName(cnr, bucketName) case objectSDK.TypeTombstone: bucketName = tombstoneBucketName(cnr, bucketName) - case objectSDK.TypeStorageGroup: - bucketName = storageGroupBucketName(cnr, bucketName) case objectSDK.TypeLock: bucketName = bucketNameLockers(cnr, bucketName) default: diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index ee161a881..d2a4bfa7b 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "errors" "testing" @@ -139,6 +140,6 @@ func metaDelete(db *meta.DB, addrs ...oid.Address) error { var deletePrm meta.DeletePrm deletePrm.SetAddresses(addrs...) - _, err := db.Delete(deletePrm) + _, err := db.Delete(context.Background(), deletePrm) return err } diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 686b65880..cfd37b0d2 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -1,15 +1,19 @@ package meta import ( + "context" "fmt" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // ExistsPrm groups the parameters of Exists operation. @@ -39,7 +43,13 @@ func (p ExistsRes) Exists() bool { // // Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) { +func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Exists", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go index e344e9ee8..034bc0005 100644 --- a/pkg/local_object_storage/metabase/exists_test.go +++ b/pkg/local_object_storage/metabase/exists_test.go @@ -57,18 +57,6 @@ func TestDB_Exists(t *testing.T) { require.True(t, exists) }) - t.Run("storage group object", func(t *testing.T) { - sg := testutil.GenerateObject() - sg.SetType(objectSDK.TypeStorageGroup) - - err := putBig(db, sg) - require.NoError(t, err) - - exists, err := metaExists(db, object.AddressOf(sg)) - require.NoError(t, err) - require.True(t, exists) - }) - t.Run("lock object", func(t *testing.T) { lock := testutil.GenerateObject() lock.SetType(objectSDK.TypeLock) diff --git a/pkg/local_object_storage/metabase/generic_test.go b/pkg/local_object_storage/metabase/generic_test.go index 227aa9f8d..9d15b6f7a 100644 --- a/pkg/local_object_storage/metabase/generic_test.go +++ b/pkg/local_object_storage/metabase/generic_test.go @@ -10,6 +10,8 @@ import ( ) func TestGeneric(t *testing.T) { + t.Parallel() + defer func() { _ = os.RemoveAll(t.Name()) }() var n int diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index c0feda06c..e76b9d4a7 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -1,14 +1,18 @@ package meta import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // GetPrm groups the parameters of Get operation. @@ -46,7 +50,14 @@ func (r GetRes) Header() *objectSDK.Object { // Returns an error of type apistatus.ObjectNotFound if object is missing in DB. // Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Get(prm GetPrm) (res GetRes, err error) { +func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Get", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + attribute.Bool("raw", prm.raw), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -95,12 +106,6 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b return obj, obj.Unmarshal(data) } - // if not found then check in storage group index - data = getFromBucket(tx, storageGroupBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) - } - // if not found then check in locker index data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key) if len(data) != 0 { diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index a242a099a..c19fea8d8 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -2,6 +2,7 @@ package meta_test import ( "bytes" + "context" "fmt" "os" "runtime" @@ -54,18 +55,6 @@ func TestDB_Get(t *testing.T) { require.Equal(t, raw.CutPayload(), newObj) }) - t.Run("put storage group object", func(t *testing.T) { - raw.SetType(objectSDK.TypeStorageGroup) - raw.SetID(oidtest.ID()) - - err := putBig(db, raw) - require.NoError(t, err) - - newObj, err := metaGet(db, object.AddressOf(raw), false) - require.NoError(t, err) - require.Equal(t, raw.CutPayload(), newObj) - }) - t.Run("put lock object", func(t *testing.T) { raw.SetType(objectSDK.TypeLock) raw.SetID(oidtest.ID()) @@ -132,7 +121,7 @@ func TestDB_Get(t *testing.T) { var prm meta.InhumePrm prm.SetAddresses(obj) - _, err = db.Inhume(prm) + _, err = db.Inhume(context.Background(), prm) require.NoError(t, err) _, err = metaGet(db, obj, false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) @@ -216,7 +205,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { getPrm.SetAddress(addrs[counter%len(addrs)]) counter++ - _, err := db.Get(getPrm) + _, err := db.Get(context.Background(), getPrm) if err != nil { b.Fatal(err) } @@ -235,7 +224,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { var getPrm meta.GetPrm getPrm.SetAddress(addrs[i%len(addrs)]) - _, err := db.Get(getPrm) + _, err := db.Get(context.Background(), getPrm) if err != nil { b.Fatal(err) } @@ -248,6 +237,6 @@ func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error) prm.SetAddress(addr) prm.SetRaw(raw) - res, err := db.Get(prm) + res, err := db.Get(context.Background(), prm) return res.Header(), err } diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index b8b665541..8cd09e3f7 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -68,7 +69,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) { inhumePrm.SetAddresses(object.AddressOf(obj1)) inhumePrm.SetGCMark() - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) var counter int @@ -138,14 +139,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) { inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) inhumePrm.SetTombstoneAddress(addrTombstone) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4)) inhumePrm.SetGCMark() // inhume with GC mark - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) var ( @@ -225,7 +226,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { object.AddressOf(obj3), object.AddressOf(obj4)) inhumePrm.SetTombstoneAddress(addrTombstone) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) expectedGraveyard := []oid.Address{ @@ -320,7 +321,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { object.AddressOf(obj3), object.AddressOf(obj4)) inhumePrm.SetGCMark() - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) expectedGarbage := []oid.Address{ @@ -404,7 +405,7 @@ func TestDB_DropGraves(t *testing.T) { inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) inhumePrm.SetTombstoneAddress(addrTombstone) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) buriedTS := make([]meta.TombstonedObject, 0) diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index b6e6cadf1..a6887a33b 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -2,9 +2,11 @@ package meta import ( "bytes" + "context" "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -118,7 +120,10 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal") // // NOTE: Marks any object with GC mark (despite any prohibitions on operations // with that object) if WithForceGCMark option has been provided. -func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { +func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume") + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index b7ee5ef29..0f0774227 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -50,40 +51,40 @@ func TestInhumeTombOnTomb(t *testing.T) { inhumePrm.SetTombstoneAddress(addr2) // inhume addr1 via addr2 - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) existsPrm.SetAddress(addr1) // addr1 should become inhumed {addr1:addr2} - _, err = db.Exists(existsPrm) + _, err = db.Exists(context.Background(), existsPrm) require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) inhumePrm.SetAddresses(addr3) inhumePrm.SetTombstoneAddress(addr1) // try to inhume addr3 via addr1 - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) // record with {addr1:addr2} should be removed from graveyard // as a tomb-on-tomb; metabase should return ObjectNotFound // NOT ObjectAlreadyRemoved since that record has been removed // from graveyard but addr1 is still marked with GC - _, err = db.Exists(existsPrm) + _, err = db.Exists(context.Background(), existsPrm) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) existsPrm.SetAddress(addr3) // addr3 should be inhumed {addr3: addr1} - _, err = db.Exists(existsPrm) + _, err = db.Exists(context.Background(), existsPrm) require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) inhumePrm.SetAddresses(addr1) inhumePrm.SetTombstoneAddress(oidtest.Address()) // try to inhume addr1 (which is already a tombstone in graveyard) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) existsPrm.SetAddress(addr1) @@ -91,7 +92,7 @@ func TestInhumeTombOnTomb(t *testing.T) { // record with addr1 key should not appear in graveyard // (tomb can not be inhumed) but should be kept as object // with GC mark - _, err = db.Exists(existsPrm) + _, err = db.Exists(context.Background(), existsPrm) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) } @@ -100,13 +101,13 @@ func TestInhumeLocked(t *testing.T) { locked := oidtest.Address() - err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()}) + err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()}) require.NoError(t, err) var prm meta.InhumePrm prm.SetAddresses(locked) - _, err = db.Inhume(prm) + _, err = db.Inhume(context.Background(), prm) var e apistatus.ObjectLocked require.ErrorAs(t, err, &e) @@ -117,6 +118,6 @@ func metaInhume(db *meta.DB, target, tomb oid.Address) error { inhumePrm.SetAddresses(target) inhumePrm.SetTombstoneAddress(tomb) - _, err := db.Inhume(inhumePrm) + _, err := db.Inhume(context.Background(), inhumePrm) return err } diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 3c5888e1c..4c9dc782c 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -186,7 +186,6 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID) error) error { switch postfix { case primaryPrefix, - storageGroupPrefix, lockersPrefix, tombstonePrefix: default: diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 6b3a3612d..69bf2bee5 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "strconv" "testing" @@ -25,7 +26,6 @@ func TestDB_IterateExpired(t *testing.T) { for _, typ := range []object.Type{ object.TypeRegular, object.TypeTombstone, - object.TypeStorageGroup, object.TypeLock, } { mAlive[typ] = putWithExpiration(t, db, typ, epoch) @@ -34,7 +34,7 @@ func TestDB_IterateExpired(t *testing.T) { expiredLocked := putWithExpiration(t, db, object.TypeRegular, epoch-1) - require.NoError(t, db.Lock(expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()})) + require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()})) err := db.IterateExpired(epoch, func(exp *meta.ExpiredObject) error { if addr, ok := mAlive[exp.Type()]; ok { @@ -81,13 +81,13 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) { prm.SetAddresses(protected1, protected2, protectedLocked) prm.SetTombstoneAddress(ts) - _, err = db.Inhume(prm) + _, err = db.Inhume(context.Background(), prm) require.NoError(t, err) prm.SetAddresses(garbage) prm.SetGCMark() - _, err = db.Inhume(prm) + _, err = db.Inhume(context.Background(), prm) require.NoError(t, err) var handled []oid.Address @@ -107,7 +107,7 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) { require.Contains(t, handled, protected2) require.Contains(t, handled, protectedLocked) - err = db.Lock(protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()}) + err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()}) require.NoError(t, err) handled = handled[:0] diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 93b7efb99..7fe1040bb 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -108,8 +108,6 @@ loop: switch prefix { case primaryPrefix: objType = object.TypeRegular - case storageGroupPrefix: - objType = object.TypeStorageGroup case lockersPrefix: objType = object.TypeLock case tombstonePrefix: diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index ab2d9d75d..4bf3ca827 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -66,11 +66,13 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { } func TestLisObjectsWithCursor(t *testing.T) { + t.Parallel() + db := newDB(t) const ( containers = 5 - total = containers * 5 // regular + ts + sg + child + lock + total = containers * 4 // regular + ts + child + lock ) expected := make([]object.AddressWithType, 0, total) @@ -93,13 +95,6 @@ func TestLisObjectsWithCursor(t *testing.T) { require.NoError(t, err) expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeTombstone}) - // add one storage group - obj = testutil.GenerateObjectWithCID(containerID) - obj.SetType(objectSDK.TypeStorageGroup) - err = putBig(db, obj) - require.NoError(t, err) - expected = append(expected, object.AddressWithType{Address: object.AddressOf(obj), Type: objectSDK.TypeStorageGroup}) - // add one lock obj = testutil.GenerateObjectWithCID(containerID) obj.SetType(objectSDK.TypeLock) @@ -166,6 +161,8 @@ func TestLisObjectsWithCursor(t *testing.T) { } func TestAddObjectDuringListingWithCursor(t *testing.T) { + t.Parallel() + db := newDB(t) const total = 5 diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index 2dcc85d40..5c3c9720d 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -2,14 +2,18 @@ package meta import ( "bytes" + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var bucketNameLocked = []byte{lockedPrefix} @@ -30,7 +34,15 @@ func bucketNameLockers(idCnr cid.ID, key []byte) []byte { // Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). // // Locked list should be unique. Panics if it is empty. -func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error { +func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Lock", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("locker", locker.EncodeToString()), + attribute.Int("locked_count", len(locked)), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -266,7 +278,13 @@ func (i IsLockedRes) Locked() bool { // object is considered as non-locked. // // Returns only non-logical errors related to underlying database. -func (db *DB) IsLocked(prm IsLockedPrm) (res IsLockedRes, err error) { +func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index efa9fba06..d5e063431 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -16,18 +17,19 @@ import ( ) func TestDB_Lock(t *testing.T) { + t.Parallel() + cnr := cidtest.ID() db := newDB(t) t.Run("empty locked list", func(t *testing.T) { - require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, nil) }) - require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, []oid.ID{}) }) + require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) + require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) }) }) t.Run("(ir)regular", func(t *testing.T) { for _, typ := range [...]object.Type{ object.TypeTombstone, - object.TypeStorageGroup, object.TypeLock, object.TypeRegular, } { @@ -44,7 +46,7 @@ func TestDB_Lock(t *testing.T) { id, _ := obj.ID() // try to lock it - err = db.Lock(cnr, oidtest.ID(), []oid.ID{id}) + err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id}) if typ == object.TypeRegular { require.NoError(t, err, typ) } else { @@ -65,27 +67,27 @@ func TestDB_Lock(t *testing.T) { // check locking relation inhumePrm.SetAddresses(objAddr) - _, err := db.Inhume(inhumePrm) + _, err := db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) inhumePrm.SetTombstoneAddress(oidtest.Address()) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) // try to remove lock object inhumePrm.SetAddresses(lockAddr) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.Error(t, err) // check that locking relation has not been // dropped inhumePrm.SetAddresses(objAddr) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) inhumePrm.SetTombstoneAddress(oidtest.Address()) - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) }) @@ -105,7 +107,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetForceGCMark() inhumePrm.SetLockObjectHandling() - res, err := db.Inhume(inhumePrm) + res, err := db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) require.Len(t, res.DeletedLockObjects(), 1) require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) @@ -117,7 +119,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetGCMark() // now we can inhume the object - _, err = db.Inhume(inhumePrm) + _, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) }) @@ -134,7 +136,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) inhumePrm.SetLockObjectHandling() - res, err := db.Inhume(inhumePrm) + res, err := db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) require.Len(t, res.DeletedLockObjects(), 1) require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) @@ -151,7 +153,7 @@ func TestDB_Lock(t *testing.T) { for i := 0; i < objsNum; i++ { inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) - res, err = db.Inhume(inhumePrm) + res, err = db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) require.Len(t, res.DeletedLockObjects(), 0) } @@ -164,13 +166,15 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetForceGCMark() inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) - res, err := db.Inhume(inhumePrm) + res, err := db.Inhume(context.Background(), inhumePrm) require.NoError(t, err) require.Len(t, res.DeletedLockObjects(), 0) }) } func TestDB_Lock_Expired(t *testing.T) { + t.Parallel() + es := &epochState{e: 123} db := newDB(t, meta.WithEpochState(es)) @@ -184,7 +188,7 @@ func TestDB_Lock_Expired(t *testing.T) { require.ErrorIs(t, err, meta.ErrObjectIsExpired) // lock the obj - require.NoError(t, db.Lock(addr.Container(), oidtest.ID(), []oid.ID{addr.Object()})) + require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()})) // object is expired but locked, thus, must be available _, err = metaGet(db, addr, false) @@ -192,6 +196,8 @@ func TestDB_Lock_Expired(t *testing.T) { } func TestDB_IsLocked(t *testing.T) { + t.Parallel() + db := newDB(t) // existing and locked objs @@ -202,7 +208,7 @@ func TestDB_IsLocked(t *testing.T) { for _, obj := range objs { prm.SetAddress(objectcore.AddressOf(obj)) - res, err := db.IsLocked(prm) + res, err := db.IsLocked(context.Background(), prm) require.NoError(t, err) require.True(t, res.Locked()) @@ -212,7 +218,7 @@ func TestDB_IsLocked(t *testing.T) { prm.SetAddress(oidtest.Address()) - res, err := db.IsLocked(prm) + res, err := db.IsLocked(context.Background(), prm) require.NoError(t, err) require.False(t, res.Locked()) @@ -224,12 +230,12 @@ func TestDB_IsLocked(t *testing.T) { var putPrm meta.PutPrm putPrm.SetObject(obj) - _, err = db.Put(putPrm) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) prm.SetAddress(objectcore.AddressOf(obj)) - res, err = db.IsLocked(prm) + res, err = db.IsLocked(context.Background(), prm) require.NoError(t, err) require.False(t, res.Locked()) @@ -260,7 +266,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Ob err := putBig(db, lockObj) require.NoError(t, err) - err = db.Lock(cnr, lockID, lockedObjIDs) + err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs) require.NoError(t, err) return lockedObjs, lockObj diff --git a/pkg/local_object_storage/metabase/movable.go b/pkg/local_object_storage/metabase/movable.go index e6990dc54..412c46393 100644 --- a/pkg/local_object_storage/metabase/movable.go +++ b/pkg/local_object_storage/metabase/movable.go @@ -1,10 +1,14 @@ package meta import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // ToMoveItPrm groups the parameters of ToMoveIt operation. @@ -48,7 +52,13 @@ func (p MovableRes) AddressList() []oid.Address { // ToMoveIt marks objects to move it into another shard. This useful for // faster HRW fetching. -func (db *DB) ToMoveIt(prm ToMoveItPrm) (res ToMoveItRes, err error) { +func (db *DB) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (res ToMoveItRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.ToMoveIt", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/metabase/movable_test.go b/pkg/local_object_storage/metabase/movable_test.go index 6918dec29..51e7e6d74 100644 --- a/pkg/local_object_storage/metabase/movable_test.go +++ b/pkg/local_object_storage/metabase/movable_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -61,7 +62,7 @@ func metaToMoveIt(db *meta.DB, addr oid.Address) error { var toMovePrm meta.ToMoveItPrm toMovePrm.SetAddress(addr) - _, err := db.ToMoveIt(toMovePrm) + _, err := db.ToMoveIt(context.Background(), toMovePrm) return err } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index b0fea6535..bc6520a05 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -1,11 +1,13 @@ package meta import ( + "context" "encoding/binary" "errors" "fmt" gio "io" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" @@ -14,6 +16,8 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/nspcc-dev/neo-go/pkg/io" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) type ( @@ -52,7 +56,13 @@ var ( // // Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (db *DB) Put(prm PutPrm) (res PutRes, err error) { +func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Put", + trace.WithAttributes( + attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -195,8 +205,6 @@ func putUniqueIndexes( bucketName = primaryBucketName(cnr, bucketName) case objectSDK.TypeTombstone: bucketName = tombstoneBucketName(cnr, bucketName) - case objectSDK.TypeStorageGroup: - bucketName = storageGroupBucketName(cnr, bucketName) case objectSDK.TypeLock: bucketName = bucketNameLockers(cnr, bucketName) default: diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 837d931ae..a3a071d19 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "runtime" "strconv" "testing" @@ -117,7 +118,7 @@ func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error { putPrm.SetObject(obj) putPrm.SetStorageID(id) - _, err := db.Put(putPrm) + _, err := db.Put(context.Background(), putPrm) return err } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 20985f47a..a4b14f77c 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -1,16 +1,21 @@ package meta import ( + "context" "encoding/binary" "errors" "fmt" "strings" v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -55,7 +60,13 @@ func (r SelectRes) AddressList() []oid.Address { } // Select returns list of addresses of objects that match search filters. -func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) { +func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.Select", + trace.WithAttributes( + attribute.String("container_id", prm.cnr.EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -140,7 +151,6 @@ func (db *DB) selectAll(tx *bbolt.Tx, cnr cid.ID, to map[string]int) { bucketName := make([]byte, bucketKeySize) selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, 0) selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, 0) - selectAllFromBucket(tx, storageGroupBucketName(cnr, bucketName), to, 0) selectAllFromBucket(tx, parentBucketName(cnr, bucketName), to, 0) selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, 0) } @@ -195,7 +205,6 @@ func (db *DB) selectFastFilter( case v2object.FilterPropertyPhy: selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum) selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum) - selectAllFromBucket(tx, storageGroupBucketName(cnr, bucketName), to, fNum) selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum) default: // user attribute bucketName := attributeBucketName(cnr, f.Header(), bucketName) @@ -209,10 +218,9 @@ func (db *DB) selectFastFilter( } var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{ - v2object.TypeRegular.String(): {primaryBucketName, parentBucketName}, - v2object.TypeTombstone.String(): {tombstoneBucketName}, - v2object.TypeStorageGroup.String(): {storageGroupBucketName}, - v2object.TypeLock.String(): {bucketNameLockers}, + v2object.TypeRegular.String(): {primaryBucketName, parentBucketName}, + v2object.TypeTombstone.String(): {tombstoneBucketName}, + v2object.TypeLock.String(): {bucketNameLockers}, } func allBucketNames(cnr cid.ID) (names [][]byte) { @@ -267,7 +275,7 @@ func (db *DB) selectFromFKBT( ) { // matchFunc, ok := db.matchers[f.Operation()] if !ok { - db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation()))) + db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation()))) return } @@ -290,7 +298,7 @@ func (db *DB) selectFromFKBT( }) }) if err != nil { - db.log.Debug("error in FKBT selection", zap.String("error", err.Error())) + db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error())) } } @@ -360,13 +368,13 @@ func (db *DB) selectFromList( case object.MatchStringEqual: lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) if err != nil { - db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error())) + db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error())) return } default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op))) + db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op))) return } @@ -374,7 +382,7 @@ func (db *DB) selectFromList( if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error { l, err := decodeList(val) if err != nil { - db.log.Debug("can't decode list bucket leaf", + db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()), ) @@ -385,7 +393,7 @@ func (db *DB) selectFromList( return nil }); err != nil { - db.log.Debug("can't iterate over the bucket", + db.log.Debug(logs.MetabaseCantIterateOverTheBucket, zap.String("error", err.Error()), ) @@ -429,7 +437,7 @@ func (db *DB) selectObjectID( default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug("unknown operation", + db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(f.Operation())), ) @@ -451,7 +459,7 @@ func (db *DB) selectObjectID( return nil }) if err != nil { - db.log.Debug("could not iterate over the buckets", + db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets, zap.String("error", err.Error()), ) } diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5d4cc75e5..e107085ab 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "encoding/hex" "strconv" "testing" @@ -19,6 +20,8 @@ import ( ) func TestDB_SelectUserAttributes(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -141,6 +144,8 @@ func TestDB_SelectUserAttributes(t *testing.T) { } func TestDB_SelectRootPhyParent(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -156,11 +161,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) { err = putBig(db, ts) require.NoError(t, err) - sg := testutil.GenerateObjectWithCID(cnr) - sg.SetType(objectSDK.TypeStorageGroup) - err = putBig(db, sg) - require.NoError(t, err) - leftChild := testutil.GenerateObjectWithCID(cnr) leftChild.InitRelations() err = putBig(db, leftChild) @@ -209,7 +209,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) { testSelect(t, db, cnr, fs, object.AddressOf(small), object.AddressOf(ts), - object.AddressOf(sg), object.AddressOf(leftChild), object.AddressOf(rightChild), object.AddressOf(link), @@ -236,7 +235,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) { fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeRegular.String(), objectSDK.MatchStringNotEqual) testSelect(t, db, cnr, fs, object.AddressOf(ts), - object.AddressOf(sg), object.AddressOf(lock), ) @@ -258,29 +256,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) { object.AddressOf(rightChild), object.AddressOf(link), object.AddressOf(parent), - object.AddressOf(sg), - object.AddressOf(lock), - ) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs) - }) - - t.Run("storage group objects", func(t *testing.T) { - fs := objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeStorageGroup.String(), objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(sg)) - - fs = objectSDK.SearchFilters{} - fs.AddFilter(v2object.FilterHeaderObjectType, v2object.TypeStorageGroup.String(), objectSDK.MatchStringNotEqual) - testSelect(t, db, cnr, fs, - object.AddressOf(small), - object.AddressOf(leftChild), - object.AddressOf(rightChild), - object.AddressOf(link), - object.AddressOf(parent), - object.AddressOf(ts), object.AddressOf(lock), ) @@ -312,7 +287,6 @@ func TestDB_SelectRootPhyParent(t *testing.T) { testSelect(t, db, cnr, fs, object.AddressOf(small), object.AddressOf(ts), - object.AddressOf(sg), object.AddressOf(leftChild), object.AddressOf(rightChild), object.AddressOf(link), @@ -323,6 +297,8 @@ func TestDB_SelectRootPhyParent(t *testing.T) { } func TestDB_SelectInhume(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -355,6 +331,8 @@ func TestDB_SelectInhume(t *testing.T) { } func TestDB_SelectPayloadHash(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -423,6 +401,8 @@ func TestDB_SelectPayloadHash(t *testing.T) { } func TestDB_SelectWithSlowFilters(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -528,6 +508,8 @@ func TestDB_SelectWithSlowFilters(t *testing.T) { } func TestDB_SelectObjectID(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -549,11 +531,6 @@ func TestDB_SelectObjectID(t *testing.T) { err = putBig(db, ts) require.NoError(t, err) - sg := testutil.GenerateObjectWithCID(cnr) - sg.SetType(objectSDK.TypeStorageGroup) - err = putBig(db, sg) - require.NoError(t, err) - lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(objectSDK.TypeLock) err = putBig(db, lock) @@ -575,7 +552,6 @@ func TestDB_SelectObjectID(t *testing.T) { testSelect(t, db, cnr, fs, object.AddressOf(regular), object.AddressOf(parent), - object.AddressOf(sg), object.AddressOf(ts), object.AddressOf(lock), ) @@ -592,7 +568,6 @@ func TestDB_SelectObjectID(t *testing.T) { fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) testSelect(t, db, cnr, fs, object.AddressOf(parent), - object.AddressOf(sg), object.AddressOf(ts), object.AddressOf(lock), ) @@ -610,24 +585,6 @@ func TestDB_SelectObjectID(t *testing.T) { testSelect(t, db, cnr, fs, object.AddressOf(regular), object.AddressOf(parent), - object.AddressOf(sg), - object.AddressOf(lock), - ) - }) - - t.Run("storage group objects", func(t *testing.T) { - id, _ := sg.ID() - - fs := objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringEqual, id) - testSelect(t, db, cnr, fs, object.AddressOf(sg)) - - fs = objectSDK.SearchFilters{} - fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) - testSelect(t, db, cnr, fs, - object.AddressOf(regular), - object.AddressOf(parent), - object.AddressOf(ts), object.AddressOf(lock), ) }) @@ -643,7 +600,6 @@ func TestDB_SelectObjectID(t *testing.T) { fs.AddObjectIDFilter(objectSDK.MatchStringNotEqual, id) testSelect(t, db, cnr, fs, object.AddressOf(regular), - object.AddressOf(sg), object.AddressOf(ts), object.AddressOf(lock), ) @@ -661,13 +617,14 @@ func TestDB_SelectObjectID(t *testing.T) { testSelect(t, db, cnr, fs, object.AddressOf(regular), object.AddressOf(parent), - object.AddressOf(sg), object.AddressOf(ts), ) }) } func TestDB_SelectSplitID(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -722,6 +679,8 @@ func TestDB_SelectSplitID(t *testing.T) { } func TestDB_SelectContainerID(t *testing.T) { + t.Parallel() + db := newDB(t) cnr := cidtest.ID() @@ -807,6 +766,8 @@ func BenchmarkSelect(b *testing.B) { } func TestExpiredObjects(t *testing.T) { + t.Parallel() + db := newDB(t, meta.WithEpochState(epochState{currEpoch})) checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { @@ -829,7 +790,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear prm.SetFilters(fs) for i := 0; i < b.N; i++ { - res, err := db.Select(prm) + res, err := db.Select(context.Background(), prm) if err != nil { b.Fatal(err) } @@ -844,6 +805,6 @@ func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.A prm.SetFilters(fs) prm.SetContainerID(cnr) - res, err := db.Select(prm) + res, err := db.Select(context.Background(), prm) return res.AddressList(), err } diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go index ae309d4b2..794879a3f 100644 --- a/pkg/local_object_storage/metabase/storage_id.go +++ b/pkg/local_object_storage/metabase/storage_id.go @@ -1,11 +1,15 @@ package meta import ( + "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/nspcc-dev/neo-go/pkg/util/slice" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // StorageIDPrm groups the parameters of StorageID operation. @@ -30,7 +34,13 @@ func (r StorageIDRes) StorageID() []byte { // StorageID returns storage descriptor for objects from the blobstor. // It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(prm StorageIDPrm) (res StorageIDRes, err error) { +func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) { + _, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + db.modeMtx.RLock() defer db.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go index f8185abee..b3652a680 100644 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ b/pkg/local_object_storage/metabase/storage_id_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -11,6 +12,8 @@ import ( ) func TestDB_StorageID(t *testing.T) { + t.Parallel() + db := newDB(t) raw1 := testutil.GenerateObject() @@ -63,6 +66,6 @@ func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) { var sidPrm meta.StorageIDPrm sidPrm.SetAddress(addr) - r, err := db.StorageID(sidPrm) + r, err := db.StorageID(context.Background(), sidPrm) return r.StorageID(), err } diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index b60c97fd7..c50fd051f 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -65,10 +65,10 @@ const ( // Key: object ID // Value: marshalled object lockersPrefix - // storageGroupPrefix is used for prefixing buckets containing objects of STORAGEGROUP type. + // _ is unused. Previous usage was for prefixing buckets containing objects of STORAGEGROUP type. // Key: object ID // Value: marshaled object - storageGroupPrefix + _ // tombstonePrefix is used for prefixing buckets containing objects of TOMBSTONE type. // Key: object ID // Value: marshaled object @@ -138,11 +138,6 @@ func tombstoneBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, tombstonePrefix, key) } -// storageGroupBucketName returns _SG. -func storageGroupBucketName(cnr cid.ID, key []byte) []byte { - return bucketName(cnr, storageGroupPrefix, key) -} - // smallBucketName returns _small. func smallBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, smallPrefix, key) @@ -231,15 +226,14 @@ func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) object panic("empty object list in firstIrregularObjectType") } - var keys [3][1 + cidSize]byte + var keys [2][1 + cidSize]byte irregularTypeBuckets := [...]struct { typ object.Type name []byte }{ {object.TypeTombstone, tombstoneBucketName(idCnr, keys[0][:])}, - {object.TypeStorageGroup, storageGroupBucketName(idCnr, keys[1][:])}, - {object.TypeLock, bucketNameLockers(idCnr, keys[2][:])}, + {object.TypeLock, bucketNameLockers(idCnr, keys[1][:])}, } for i := range objs { diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 994c3d416..1ecc89cb5 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -2,6 +2,7 @@ package pilorama import ( "bytes" + "context" "encoding/binary" "errors" "fmt" @@ -11,12 +12,15 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/io" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) type boltForest struct { @@ -144,7 +148,17 @@ func (t *boltForest) Close() error { } // TreeMove implements the Forest interface. -func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error) { +func (t *boltForest) TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeMove", + trace.WithAttributes( + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + if !d.checkValid() { return nil, ErrInvalidCIDDescriptor } @@ -175,7 +189,15 @@ func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, e } // TreeExists implements the Forest interface. -func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { +func (t *boltForest) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeExists", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -197,7 +219,16 @@ func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { var syncHeightKey = []byte{'h'} // TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error { +func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeUpdateLastSyncHeight", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + rawHeight := make([]byte, 8) binary.LittleEndian.PutUint64(rawHeight, height) @@ -214,7 +245,15 @@ func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, heig } // TreeLastSyncHeight implements the pilorama.Forest interface. -func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) { +func (t *boltForest) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeLastSyncHeight", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + var height uint64 buck := bucketName(cid, treeID) @@ -235,7 +274,20 @@ func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, e } // TreeAddByPath implements the Forest interface. -func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) { +func (t *boltForest) TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeAddByPath", + trace.WithAttributes( + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Int("meta_count", len(meta)), + ), + ) + defer span.End() + if !d.checkValid() { return nil, ErrInvalidCIDDescriptor } @@ -329,7 +381,16 @@ func (t *boltForest) findSpareID(bTree *bbolt.Bucket) uint64 { } // TreeApply implements the Forest interface. -func (t *boltForest) TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error { +func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApply", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.Bool("background", backgroundSync), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -627,7 +688,18 @@ func (t *boltForest) isAncestor(b *bbolt.Bucket, parent, child Node) bool { } // TreeGetByPath implements the Forest interface. -func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { +func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetByPath", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Bool("latest", latest), + ), + ) + defer span.End() + if !isAttributeInternal(attr) { return nil, ErrNotPathAttribute } @@ -686,7 +758,16 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa } // TreeGetMeta implements the forest interface. -func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) { +func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetMeta", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -717,7 +798,16 @@ func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Met } // TreeGetChildren implements the Forest interface. -func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) { +func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetChildren", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -749,7 +839,14 @@ func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) } // TreeList implements the Forest interface. -func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) { +func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeList", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -783,7 +880,16 @@ func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) { } // TreeGetOpLog implements the pilorama.Forest interface. -func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) { +func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetOpLog", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() @@ -813,7 +919,15 @@ func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) ( } // TreeDrop implements the pilorama.Forest interface. -func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error { +func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeDrop", + trace.WithAttributes( + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + t.modeMtx.RLock() defer t.modeMtx.RUnlock() diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index fa2f1dcd2..672a38edd 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -1,6 +1,7 @@ package pilorama import ( + "context" "sort" "strings" @@ -11,7 +12,7 @@ import ( // memoryForest represents multiple replicating trees sharing a single storage. type memoryForest struct { // treeMap maps tree identifier (container ID + name) to the replicated log. - treeMap map[string]*state + treeMap map[string]*memoryTree } var _ Forest = (*memoryForest)(nil) @@ -20,12 +21,12 @@ var _ Forest = (*memoryForest)(nil) // TODO: this function will eventually be removed and is here for debugging. func NewMemoryForest() ForestStorage { return &memoryForest{ - treeMap: make(map[string]*state), + treeMap: make(map[string]*memoryTree), } } // TreeMove implements the Forest interface. -func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move, error) { +func (f *memoryForest) TreeMove(_ context.Context, d CIDDescriptor, treeID string, op *Move) (*Move, error) { if !d.checkValid() { return nil, ErrInvalidCIDDescriptor } @@ -33,7 +34,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move fullID := d.CID.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { - s = newState() + s = newMemoryTree() f.treeMap[fullID] = s } @@ -48,7 +49,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move } // TreeAddByPath implements the Forest interface. -func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) { +func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) { if !d.checkValid() { return nil, ErrInvalidCIDDescriptor } @@ -59,7 +60,7 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string fullID := d.CID.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { - s = newState() + s = newMemoryTree() f.treeMap[fullID] = s } @@ -88,16 +89,17 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string }, Child: s.findSpareID(), }) + s.operations = append(s.operations, op) lm[len(lm)-1] = op.Move return lm, nil } // TreeApply implements the Forest interface. -func (f *memoryForest) TreeApply(cnr cid.ID, treeID string, op *Move, _ bool) error { +func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, op *Move, _ bool) error { fullID := cnr.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { - s = newState() + s = newMemoryTree() f.treeMap[fullID] = s } @@ -119,7 +121,7 @@ func (f *memoryForest) Close() error { } // TreeGetByPath implements the Forest interface. -func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { +func (f *memoryForest) TreeGetByPath(_ context.Context, cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) { if !isAttributeInternal(attr) { return nil, ErrNotPathAttribute } @@ -130,40 +132,36 @@ func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, pat return nil, ErrTreeNotFound } - return s.get(attr, path, latest), nil + return s.getByPath(attr, path, latest), nil } // TreeGetMeta implements the Forest interface. -func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) { +func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { return Meta{}, 0, ErrTreeNotFound } - return s.getMeta(nodeID), s.infoMap[nodeID].Parent, nil + return s.infoMap[nodeID].Meta, s.infoMap[nodeID].Parent, nil } // TreeGetChildren implements the Forest interface. -func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) ([]uint64, error) { +func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]uint64, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { return nil, ErrTreeNotFound } - children, ok := s.childMap[nodeID] - if !ok { - return nil, nil - } - + children := s.tree.getChildren(nodeID) res := make([]Node, len(children)) copy(res, children) return res, nil } // TreeGetOpLog implements the pilorama.Forest interface. -func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (Move, error) { +func (f *memoryForest) TreeGetOpLog(_ context.Context, cid cid.ID, treeID string, height uint64) (Move, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -180,7 +178,7 @@ func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (M } // TreeDrop implements the pilorama.Forest interface. -func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error { +func (f *memoryForest) TreeDrop(_ context.Context, cid cid.ID, treeID string) error { cidStr := cid.String() if treeID == "" { for k := range f.treeMap { @@ -200,7 +198,7 @@ func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error { } // TreeList implements the pilorama.Forest interface. -func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) { +func (f *memoryForest) TreeList(_ context.Context, cid cid.ID) ([]string, error) { var res []string cidStr := cid.EncodeToString() @@ -217,14 +215,14 @@ func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) { } // TreeExists implements the pilorama.Forest interface. -func (f *memoryForest) TreeExists(cid cid.ID, treeID string) (bool, error) { +func (f *memoryForest) TreeExists(_ context.Context, cid cid.ID, treeID string) (bool, error) { fullID := cid.EncodeToString() + "/" + treeID _, ok := f.treeMap[fullID] return ok, nil } // TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, height uint64) error { +func (f *memoryForest) TreeUpdateLastSyncHeight(_ context.Context, cid cid.ID, treeID string, height uint64) error { fullID := cid.EncodeToString() + "/" + treeID t, ok := f.treeMap[fullID] if !ok { @@ -235,7 +233,7 @@ func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, heigh } // TreeLastSyncHeight implements the pilorama.Forest interface. -func (f *memoryForest) TreeLastSyncHeight(cid cid.ID, treeID string) (uint64, error) { +func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) { fullID := cid.EncodeToString() + "/" + treeID t, ok := f.treeMap[fullID] if !ok { diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index bbd35246c..0cff28c3f 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -1,9 +1,9 @@ package pilorama import ( + "context" "fmt" "math/rand" - "os" "path/filepath" "strconv" "sync" @@ -30,26 +30,21 @@ var providers = []struct { return f }}, {"bbolt", func(t testing.TB, opts ...Option) Forest { - // Use `os.TempDir` because we construct multiple times in the same test. - tmpDir, err := os.MkdirTemp(os.TempDir(), "*") - require.NoError(t, err) - f := NewBoltForest( append([]Option{ - WithPath(filepath.Join(tmpDir, "test.db")), + WithPath(filepath.Join(t.TempDir(), "test.db")), WithMaxBatchSize(1)}, opts...)...) require.NoError(t, f.Open(false)) require.NoError(t, f.Init()) t.Cleanup(func() { require.NoError(t, f.Close()) - require.NoError(t, os.RemoveAll(tmpDir)) }) return f }}, } func testMeta(t *testing.T, f Forest, cid cidSDK.ID, treeID string, nodeID, parentID Node, expected Meta) { - actualMeta, actualParent, err := f.TreeGetMeta(cid, treeID, nodeID) + actualMeta, actualParent, err := f.TreeGetMeta(context.Background(), cid, treeID, nodeID) require.NoError(t, err) require.Equal(t, parentID, actualParent) require.Equal(t, expected, actualMeta) @@ -71,13 +66,13 @@ func testForestTreeMove(t *testing.T, s Forest) { meta := []KeyValue{ {Key: AttributeVersion, Value: []byte("XXX")}, {Key: AttributeFilename, Value: []byte("file.txt")}} - lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta) + lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) require.NoError(t, err) require.Equal(t, 3, len(lm)) nodeID := lm[2].Child t.Run("invalid descriptor", func(t *testing.T) { - _, err = s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, &Move{ + _, err = s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, &Move{ Parent: lm[1].Child, Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, Child: nodeID, @@ -85,7 +80,7 @@ func testForestTreeMove(t *testing.T, s Forest) { require.ErrorIs(t, err, ErrInvalidCIDDescriptor) }) t.Run("same parent, update meta", func(t *testing.T) { - res, err := s.TreeMove(d, treeID, &Move{ + res, err := s.TreeMove(context.Background(), d, treeID, &Move{ Parent: lm[1].Child, Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, Child: nodeID, @@ -93,12 +88,12 @@ func testForestTreeMove(t *testing.T, s Forest) { require.NoError(t, err) require.Equal(t, res.Child, nodeID) - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) require.NoError(t, err) require.ElementsMatch(t, []Node{nodeID}, nodes) }) t.Run("different parent", func(t *testing.T) { - res, err := s.TreeMove(d, treeID, &Move{ + res, err := s.TreeMove(context.Background(), d, treeID, &Move{ Parent: RootID, Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})}, Child: nodeID, @@ -106,11 +101,11 @@ func testForestTreeMove(t *testing.T, s Forest) { require.NoError(t, err) require.Equal(t, res.Child, nodeID) - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) require.NoError(t, err) require.True(t, len(nodes) == 0) - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false) require.NoError(t, err) require.ElementsMatch(t, []Node{nodeID}, nodes) }) @@ -130,7 +125,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) { treeID := "version" treeAdd := func(t *testing.T, child, parent Node) { - _, err := s.TreeMove(d, treeID, &Move{ + _, err := s.TreeMove(context.Background(), d, treeID, &Move{ Parent: parent, Child: child, }) @@ -152,7 +147,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) { treeAdd(t, 7, 0) testGetChildren := func(t *testing.T, nodeID Node, expected []Node) { - actual, err := s.TreeGetChildren(cid, treeID, nodeID) + actual, err := s.TreeGetChildren(context.Background(), cid, treeID, nodeID) require.NoError(t, err) require.ElementsMatch(t, expected, actual) } @@ -168,7 +163,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) { testGetChildren(t, 42, nil) }) t.Run("missing tree", func(t *testing.T) { - _, err := s.TreeGetChildren(cid, treeID+"123", 0) + _, err := s.TreeGetChildren(context.Background(), cid, treeID+"123", 0) require.ErrorIs(t, err, ErrTreeNotFound) }) } @@ -191,10 +186,10 @@ func testForestTreeDrop(t *testing.T, s Forest) { cid := cids[0] t.Run("return nil if not found", func(t *testing.T) { - require.ErrorIs(t, s.TreeDrop(cid, "123"), ErrTreeNotFound) + require.ErrorIs(t, s.TreeDrop(context.Background(), cid, "123"), ErrTreeNotFound) }) - require.NoError(t, s.TreeDrop(cid, "")) + require.NoError(t, s.TreeDrop(context.Background(), cid, "")) trees := []string{"tree1", "tree2"} var descs [cidsSize]CIDDescriptor @@ -203,39 +198,39 @@ func testForestTreeDrop(t *testing.T, s Forest) { } d := descs[0] for i := range trees { - _, err := s.TreeAddByPath(d, trees[i], AttributeFilename, []string{"path"}, + _, err := s.TreeAddByPath(context.Background(), d, trees[i], AttributeFilename, []string{"path"}, []KeyValue{{Key: "TreeName", Value: []byte(trees[i])}}) require.NoError(t, err) } - err := s.TreeDrop(cid, trees[0]) + err := s.TreeDrop(context.Background(), cid, trees[0]) require.NoError(t, err) - _, err = s.TreeGetByPath(cid, trees[0], AttributeFilename, []string{"path"}, true) + _, err = s.TreeGetByPath(context.Background(), cid, trees[0], AttributeFilename, []string{"path"}, true) require.ErrorIs(t, err, ErrTreeNotFound) - _, err = s.TreeGetByPath(cid, trees[1], AttributeFilename, []string{"path"}, true) + _, err = s.TreeGetByPath(context.Background(), cid, trees[1], AttributeFilename, []string{"path"}, true) require.NoError(t, err) for j := range descs { for i := range trees { - _, err := s.TreeAddByPath(descs[j], trees[i], AttributeFilename, []string{"path"}, + _, err := s.TreeAddByPath(context.Background(), descs[j], trees[i], AttributeFilename, []string{"path"}, []KeyValue{{Key: "TreeName", Value: []byte(trees[i])}}) require.NoError(t, err) } } - list, err := s.TreeList(cid) + list, err := s.TreeList(context.Background(), cid) require.NoError(t, err) require.NotEmpty(t, list) - require.NoError(t, s.TreeDrop(cid, "")) + require.NoError(t, s.TreeDrop(context.Background(), cid, "")) - list, err = s.TreeList(cid) + list, err = s.TreeList(context.Background(), cid) require.NoError(t, err) require.Empty(t, list) for j := 1; j < len(cids); j++ { - list, err = s.TreeList(cids[j]) + list, err = s.TreeList(context.Background(), cids[j]) require.NoError(t, err) require.Equal(t, len(list), len(trees)) } @@ -264,24 +259,24 @@ func testForestTreeAdd(t *testing.T, s Forest) { } t.Run("invalid descriptor", func(t *testing.T) { - _, err := s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, m) + _, err := s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, m) require.ErrorIs(t, err, ErrInvalidCIDDescriptor) }) - lm, err := s.TreeMove(d, treeID, m) + lm, err := s.TreeMove(context.Background(), d, treeID, m) require.NoError(t, err) testMeta(t, s, cid, treeID, lm.Child, lm.Parent, Meta{Time: lm.Time, Items: meta}) - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false) require.NoError(t, err) require.ElementsMatch(t, []Node{lm.Child}, nodes) t.Run("other trees are unaffected", func(t *testing.T) { - _, err := s.TreeGetByPath(cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false) + _, err := s.TreeGetByPath(context.Background(), cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false) require.ErrorIs(t, err, ErrTreeNotFound) - _, _, err = s.TreeGetMeta(cid, treeID+"123", 0) + _, _, err = s.TreeGetMeta(context.Background(), cid, treeID+"123", 0) require.ErrorIs(t, err, ErrTreeNotFound) }) } @@ -304,15 +299,15 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { {Key: AttributeFilename, Value: []byte("file.txt")}} t.Run("invalid descriptor", func(t *testing.T) { - _, err := s.TreeAddByPath(CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta) + _, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta) require.ErrorIs(t, err, ErrInvalidCIDDescriptor) }) t.Run("invalid attribute", func(t *testing.T) { - _, err := s.TreeAddByPath(d, treeID, AttributeVersion, []string{"yyy"}, meta) + _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeVersion, []string{"yyy"}, meta) require.ErrorIs(t, err, ErrNotPathAttribute) }) - lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta) + lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) require.NoError(t, err) require.Equal(t, 3, len(lm)) testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("path")}}}) @@ -321,8 +316,12 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { firstID := lm[2].Child testMeta(t, s, cid, treeID, firstID, lm[2].Parent, Meta{Time: lm[2].Time, Items: meta}) + // TreeAddByPath must return operations in increasing time order. + require.True(t, lm[0].Time < lm[1].Time) + require.True(t, lm[1].Time < lm[2].Time) + meta[0].Value = []byte("YYY") - lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta) + lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) require.NoError(t, err) require.Equal(t, 1, len(lm)) @@ -331,19 +330,19 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { t.Run("get versions", func(t *testing.T) { // All versions. - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false) require.NoError(t, err) require.ElementsMatch(t, []Node{firstID, secondID}, nodes) // Latest version. - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true) require.NoError(t, err) require.Equal(t, []Node{secondID}, nodes) }) meta[0].Value = []byte("ZZZ") meta[1].Value = []byte("cat.jpg") - lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "dir"}, meta) + lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "dir"}, meta) require.NoError(t, err) require.Equal(t, 2, len(lm)) testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("dir")}}}) @@ -352,7 +351,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { t.Run("create internal nodes", func(t *testing.T) { meta[0].Value = []byte("SomeValue") meta[1].Value = []byte("another") - lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path"}, meta) + lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path"}, meta) require.NoError(t, err) require.Equal(t, 1, len(lm)) @@ -360,7 +359,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { meta[0].Value = []byte("Leaf") meta[1].Value = []byte("file.txt") - lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "another"}, meta) + lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "another"}, meta) require.NoError(t, err) require.Equal(t, 2, len(lm)) @@ -375,12 +374,12 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { {AttributeFilename, []byte("another")}}}) t.Run("get by path", func(t *testing.T) { - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false) require.NoError(t, err) require.Equal(t, 2, len(nodes)) require.ElementsMatch(t, []Node{lm[0].Child, oldMove.Child}, nodes) - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false) require.NoError(t, err) require.Equal(t, 1, len(nodes)) require.Equal(t, lm[1].Child, nodes[0]) @@ -391,11 +390,11 @@ func testForestTreeAddByPath(t *testing.T, s Forest) { meta := []KeyValue{ {Key: AttributeVersion, Value: []byte("XXX")}, {Key: AttributeFilename, Value: []byte{}}} - lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta) + lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta) require.NoError(t, err) require.Equal(t, 1, len(lm)) - nodes, err := s.TreeGetByPath(d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false) + nodes, err := s.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false) require.NoError(t, err) require.Equal(t, 1, len(nodes)) require.Equal(t, lm[0].Child, nodes[0]) @@ -415,7 +414,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio treeID := "version" testApply := func(t *testing.T, s Forest, child, parent Node, meta Meta) { - require.NoError(t, s.TreeApply(cid, treeID, &Move{ + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{ Child: child, Parent: parent, Meta: meta, @@ -475,16 +474,16 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op s := constructor(t) t.Run("empty log, no panic", func(t *testing.T) { - _, err := s.TreeGetOpLog(cid, treeID, 0) + _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) require.ErrorIs(t, err, ErrTreeNotFound) }) for i := range logs { - require.NoError(t, s.TreeApply(cid, treeID, &logs[i], false)) + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &logs[i], false)) } testGetOpLog := func(t *testing.T, height uint64, m Move) { - lm, err := s.TreeGetOpLog(cid, treeID, height) + lm, err := s.TreeGetOpLog(context.Background(), cid, treeID, height) require.NoError(t, err) require.Equal(t, m, lm) } @@ -498,7 +497,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op testGetOpLog(t, 261, Move{}) }) t.Run("missing tree", func(t *testing.T) { - _, err := s.TreeGetOpLog(cid, treeID+"123", 4) + _, err := s.TreeGetOpLog(context.Background(), cid, treeID+"123", 4) require.ErrorIs(t, err, ErrTreeNotFound) }) } @@ -515,7 +514,7 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O s := constructor(t) checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { - actual, err := s.TreeExists(cid, treeID) + actual, err := s.TreeExists(context.Background(), cid, treeID) require.NoError(t, err) require.Equal(t, expected, actual) } @@ -527,13 +526,13 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O checkExists(t, false, cid, treeID) }) - require.NoError(t, s.TreeApply(cid, treeID, &Move{Parent: 0, Child: 1}, false)) + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{Parent: 0, Child: 1}, false)) checkExists(t, true, cid, treeID) checkExists(t, false, cidtest.ID(), treeID) // different CID, same tree checkExists(t, false, cid, "another tree") // same CID, different tree t.Run("can be removed", func(t *testing.T) { - require.NoError(t, s.TreeDrop(cid, treeID)) + require.NoError(t, s.TreeDrop(context.Background(), cid, treeID)) checkExists(t, false, cid, treeID) }) } @@ -563,11 +562,11 @@ func TestApplyTricky1(t *testing.T) { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) for i := range ops { - require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false)) + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } for i := range expected { - _, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child) + _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child) require.NoError(t, err) require.Equal(t, expected[i].parent, parent) } @@ -624,11 +623,11 @@ func TestApplyTricky2(t *testing.T) { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) for i := range ops { - require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false)) + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } for i := range expected { - _, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child) + _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child) require.NoError(t, err) require.Equal(t, expected[i].parent, parent) } @@ -697,9 +696,9 @@ func prepareRandomTree(nodeCount, opCount int) []Move { func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) { for i := uint64(0); i < uint64(nodeCount); i++ { - expectedMeta, expectedParent, err := expected.TreeGetMeta(cid, treeID, i) + expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i) require.NoError(t, err) - actualMeta, actualParent, err := actual.TreeGetMeta(cid, treeID, i) + actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i) require.NoError(t, err) require.Equal(t, expectedParent, actualParent, "node id: %d", i) require.Equal(t, expectedMeta, actualMeta, "node id: %d", i) @@ -713,13 +712,6 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID require.True(t, ok) require.Equal(t, se.operations, sa.operations) require.Equal(t, se.infoMap, sa.infoMap) - - require.Equal(t, len(se.childMap), len(sa.childMap)) - for ck, la := range sa.childMap { - le, ok := se.childMap[ck] - require.True(t, ok) - require.ElementsMatch(t, le, la) - } } require.Equal(t, expected, actual, i) } @@ -738,7 +730,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ expected := constructor(t) for i := range ops { - require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false)) + require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } for i := 0; i < iterCount; i++ { @@ -753,7 +745,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ go func() { defer wg.Done() for op := range ch { - require.NoError(t, actual.TreeApply(cid, treeID, op, false)) + require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, op, false)) } }() } @@ -783,7 +775,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. expected := constructor(t) for i := range ops { - require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false)) + require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } const iterCount = 200 @@ -793,7 +785,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. actual := constructor(t) for i := range ops { - require.NoError(t, actual.TreeApply(cid, treeID, &ops[i], false)) + require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } compareForests(t, expected, actual, cid, treeID, nodeCount) } @@ -886,7 +878,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) { b.SetParallelism(10) b.RunParallel(func(pb *testing.PB) { for pb.Next() { - if err := s.TreeApply(cid, treeID, &ops[<-ch], false); err != nil { + if err := s.TreeApply(context.Background(), cid, treeID, &ops[<-ch], false); err != nil { b.Fatalf("error in `Apply`: %v", err) } } @@ -929,27 +921,27 @@ func testTreeGetByPath(t *testing.T, s Forest) { } t.Run("invalid attribute", func(t *testing.T) { - _, err := s.TreeGetByPath(cid, treeID, AttributeVersion, []string{"", "TTT"}, false) + _, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeVersion, []string{"", "TTT"}, false) require.ErrorIs(t, err, ErrNotPathAttribute) }) - nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false) + nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false) require.NoError(t, err) require.Equal(t, []Node{4, 5}, nodes) - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false) require.Equal(t, []Node{3}, nodes) t.Run("missing child", func(t *testing.T) { - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false) require.True(t, len(nodes) == 0) }) t.Run("missing parent", func(t *testing.T) { - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false) require.True(t, len(nodes) == 0) }) t.Run("empty path", func(t *testing.T) { - nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, nil, false) + nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, nil, false) require.True(t, len(nodes) == 0) }) } @@ -961,7 +953,7 @@ func testMove(t *testing.T, s Forest, ts int, node, parent Node, cid cidSDK.ID, items = append(items, KeyValue{AttributeVersion, []byte(version)}) } - require.NoError(t, s.TreeApply(cid, treeID, &Move{ + require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{ Parent: parent, Child: node, Meta: Meta{ @@ -1000,7 +992,7 @@ func testTreeGetTrees(t *testing.T, s Forest) { d.CID = cid for _, treeID := range treeIDs[cid] { - _, err := s.TreeAddByPath(d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil) + _, err := s.TreeAddByPath(context.Background(), d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil) require.NoError(t, err) } } @@ -1008,7 +1000,7 @@ func testTreeGetTrees(t *testing.T, s Forest) { for _, cid := range cids { d.CID = cid - trees, err := s.TreeList(cid) + trees, err := s.TreeList(context.Background(), cid) require.NoError(t, err) require.ElementsMatch(t, treeIDs[cid], trees) @@ -1028,38 +1020,38 @@ func testTreeLastSyncHeight(t *testing.T, f Forest) { treeID := "someTree" t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) { - _, err := f.TreeLastSyncHeight(cnr, treeID) + _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) require.ErrorIs(t, err, ErrTreeNotFound) - err = f.TreeUpdateLastSyncHeight(cnr, treeID, 1) + err = f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 1) require.ErrorIs(t, err, ErrTreeNotFound) }) - _, err := f.TreeMove(CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{ + _, err := f.TreeMove(context.Background(), CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{ Parent: RootID, Child: 1, }) require.NoError(t, err) - h, err := f.TreeLastSyncHeight(cnr, treeID) + h, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) require.NoError(t, err) require.EqualValues(t, 0, h) t.Run("separate storages for separate containers", func(t *testing.T) { - _, err := f.TreeLastSyncHeight(cidtest.ID(), treeID) + _, err := f.TreeLastSyncHeight(context.Background(), cidtest.ID(), treeID) require.ErrorIs(t, err, ErrTreeNotFound) }) - require.NoError(t, f.TreeUpdateLastSyncHeight(cnr, treeID, 10)) + require.NoError(t, f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 10)) - h, err = f.TreeLastSyncHeight(cnr, treeID) + h, err = f.TreeLastSyncHeight(context.Background(), cnr, treeID) require.NoError(t, err) require.EqualValues(t, 10, h) t.Run("removed correctly", func(t *testing.T) { - require.NoError(t, f.TreeDrop(cnr, treeID)) + require.NoError(t, f.TreeDrop(context.Background(), cnr, treeID)) - _, err := f.TreeLastSyncHeight(cnr, treeID) + _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID) require.ErrorIs(t, err, ErrTreeNotFound) }) } diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index 92dc9b6aa..1bde312ac 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -1,5 +1,7 @@ package pilorama +import "sort" + // nodeInfo couples parent and metadata. type nodeInfo struct { Parent Node @@ -12,42 +14,25 @@ type move struct { Old nodeInfo } -// state represents state being replicated. -type state struct { +// memoryTree represents memoryTree being replicated. +type memoryTree struct { operations []move tree } -// newState constructs new empty tree. -func newState() *state { - return &state{ - tree: *newTree(), +// newMemoryTree constructs new empty tree. +func newMemoryTree() *memoryTree { + return &memoryTree{ + tree: tree{ + infoMap: make(map[Node]nodeInfo), + }, } } // undo un-does op and changes s in-place. -func (s *state) undo(op *move) { - children := s.tree.childMap[op.Parent] - for i := range children { - if children[i] == op.Child { - if len(children) > 1 { - s.tree.childMap[op.Parent] = append(children[:i], children[i+1:]...) - } else { - delete(s.tree.childMap, op.Parent) - } - break - } - } - +func (s *memoryTree) undo(op *move) { if op.HasOld { s.tree.infoMap[op.Child] = op.Old - oldChildren := s.tree.childMap[op.Old.Parent] - for i := range oldChildren { - if oldChildren[i] == op.Child { - return - } - } - s.tree.childMap[op.Old.Parent] = append(oldChildren, op.Child) } else { delete(s.tree.infoMap, op.Child) } @@ -55,7 +40,7 @@ func (s *state) undo(op *move) { // Apply puts op in log at a proper position, re-applies all subsequent operations // from log and changes s in-place. -func (s *state) Apply(op *Move) error { +func (s *memoryTree) Apply(op *Move) error { var index int for index = len(s.operations); index > 0; index-- { if s.operations[index-1].Time <= op.Time { @@ -82,7 +67,7 @@ func (s *state) Apply(op *Move) error { } // do performs a single move operation on a tree. -func (s *state) do(op *Move) move { +func (s *memoryTree) do(op *Move) move { lm := move{ Move: Move{ Parent: op.Parent, @@ -104,36 +89,23 @@ func (s *state) do(op *Move) move { if !ok { p.Meta.Time = op.Time - } else { - s.removeChild(op.Child, p.Parent) } p.Meta = op.Meta p.Parent = op.Parent s.tree.infoMap[op.Child] = p - s.tree.childMap[op.Parent] = append(s.tree.childMap[op.Parent], op.Child) return lm } -func (s *state) removeChild(child, parent Node) { - oldChildren := s.tree.childMap[parent] - for i := range oldChildren { - if oldChildren[i] == child { - s.tree.childMap[parent] = append(oldChildren[:i], oldChildren[i+1:]...) - break - } - } -} - -func (s *state) timestamp(pos, size int) Timestamp { +func (s *memoryTree) timestamp(pos, size int) Timestamp { if len(s.operations) == 0 { return nextTimestamp(0, uint64(pos), uint64(size)) } return nextTimestamp(s.operations[len(s.operations)-1].Time, uint64(pos), uint64(size)) } -func (s *state) findSpareID() Node { +func (s *memoryTree) findSpareID() Node { id := uint64(1) for _, ok := s.infoMap[id]; ok; _, ok = s.infoMap[id] { id++ @@ -145,14 +117,22 @@ func (s *state) findSpareID() Node { type tree struct { syncHeight uint64 infoMap map[Node]nodeInfo - childMap map[Node][]Node } -func newTree() *tree { - return &tree{ - childMap: make(map[Node][]Node), - infoMap: make(map[Node]nodeInfo), +func (t tree) getChildren(parent Node) []Node { + var children []Node + for c, info := range t.infoMap { + if info.Parent == parent { + children = append(children, c) + } } + + sort.Slice(children, func(i, j int) bool { + a := t.infoMap[children[i]] + b := t.infoMap[children[j]] + return a.Meta.Time < b.Meta.Time + }) + return children } // isAncestor returns true if parent is an ancestor of a child. @@ -176,7 +156,7 @@ func (t tree) getPathPrefix(attr string, path []string) (int, Node) { loop: for i := range path { - children := t.childMap[curNode] + children := t.getChildren(curNode) for j := range children { meta := t.infoMap[children[j]].Meta f := meta.GetAttr(attr) @@ -191,9 +171,10 @@ loop: return len(path), curNode } -// get returns list of nodes which have the specified path from root +// getByPath returns list of nodes which have the specified path from root // descending by values of attr from meta. -func (t tree) get(attr string, path []string, latest bool) []Node { +// If latest is true, only the latest node is returned. +func (t tree) getByPath(attr string, path []string, latest bool) []Node { if len(path) == 0 { return nil } @@ -206,7 +187,7 @@ func (t tree) get(attr string, path []string, latest bool) []Node { var nodes []Node var lastTs Timestamp - children := t.childMap[curNode] + children := t.getChildren(curNode) for i := range children { info := t.infoMap[children[i]] fileName := string(info.Meta.GetAttr(attr)) @@ -223,8 +204,3 @@ func (t tree) get(attr string, path []string, latest bool) []Node { return nodes } - -// getMeta returns meta information of node n. -func (t tree) getMeta(n Node) Meta { - return t.infoMap[n].Meta -} diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 290f633a5..9ca721be8 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -1,6 +1,8 @@ package pilorama import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -11,43 +13,43 @@ type Forest interface { // TreeMove moves node in the tree. // If the parent of the move operation is TrashID, the node is removed. // If the child of the move operation is RootID, new ID is generated and added to a tree. - TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error) + TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) // TreeAddByPath adds new node in the tree using provided path. // The path is constructed by descending from the root using the values of the attr in meta. // Internal nodes in path should have exactly one attribute, otherwise a new node is created. - TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) + TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) // TreeApply applies replicated operation from another node. // If background is true, TreeApply will first check whether an operation exists. - TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error + TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error // TreeGetByPath returns all nodes corresponding to the path. // The path is constructed by descending from the root using the values of the // AttributeFilename in meta. // The last argument determines whether only the node with the latest timestamp is returned. // Should return ErrTreeNotFound if the tree is not found, and empty result if the path is not in the tree. - TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) + TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) // TreeGetMeta returns meta information of the node with the specified ID. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) + TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) // TreeGetChildren returns children of the node with the specified ID. The order is arbitrary. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) + TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. - TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) + TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) // TreeDrop drops a tree from the database. // If the tree is not found, ErrTreeNotFound should be returned. // In case of empty treeID drops all trees related to container. - TreeDrop(cid cidSDK.ID, treeID string) error + TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error // TreeList returns all the tree IDs that have been added to the // passed container ID. Nil slice should be returned if no tree found. - TreeList(cid cidSDK.ID) ([]string, error) + TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) // TreeExists checks if a tree exists locally. // If the tree is not found, false and a nil error should be returned. - TreeExists(cid cidSDK.ID, treeID string) (bool, error) + TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) // TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes. - TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error + TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error // TreeLastSyncHeight returns last log height synchronized with _all_ container nodes. - TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) + TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) } type ForestStorage interface { diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index d727d27a5..e74f235f8 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -15,7 +17,7 @@ import ( ) func (s *Shard) handleMetabaseFailure(stage string, err error) error { - s.log.Error("metabase failure, switching mode", + s.log.Error(logs.ShardMetabaseFailureSwitchingMode, zap.String("stage", stage), zap.Stringer("mode", mode.ReadOnly), zap.Error(err)) @@ -25,7 +27,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error { return nil } - s.log.Error("can't move shard to readonly, switch mode", + s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode, zap.String("stage", stage), zap.Stringer("mode", mode.DegradedReadOnly), zap.Error(err)) @@ -79,7 +81,10 @@ func (s *Shard) Open() error { type metabaseSynchronizer Shard func (x *metabaseSynchronizer) Init() error { - return (*Shard)(x).refillMetabase() + ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init") + defer span.End() + + return (*Shard)(x).refillMetabase(ctx) } // Init initializes all Shard's components. @@ -157,7 +162,7 @@ func (s *Shard) Init(ctx context.Context) error { return nil } -func (s *Shard) refillMetabase() error { +func (s *Shard) refillMetabase(ctx context.Context) error { err := s.metaBase.Reset() if err != nil { return fmt.Errorf("could not reset metabase: %w", err) @@ -167,7 +172,7 @@ func (s *Shard) refillMetabase() error { err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error { if err := obj.Unmarshal(data); err != nil { - s.log.Warn("could not unmarshal object", + s.log.Warn(logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), zap.String("err", err.Error())) return nil @@ -176,9 +181,9 @@ func (s *Shard) refillMetabase() error { var err error switch obj.Type() { case objectSDK.TypeTombstone: - err = s.refillTombstoneObject(obj) + err = s.refillTombstoneObject(ctx, obj) case objectSDK.TypeLock: - err = s.refillLockObject(obj) + err = s.refillLockObject(ctx, obj) default: } if err != nil { @@ -189,7 +194,7 @@ func (s *Shard) refillMetabase() error { mPrm.SetObject(obj) mPrm.SetStorageID(descriptor) - _, err = s.metaBase.Put(mPrm) + _, err = s.metaBase.Put(ctx, mPrm) if err != nil && !meta.IsErrRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) { return err } @@ -208,7 +213,7 @@ func (s *Shard) refillMetabase() error { return nil } -func (s *Shard) refillLockObject(obj *objectSDK.Object) error { +func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { var lock objectSDK.Lock if err := lock.Unmarshal(obj.Payload()); err != nil { return fmt.Errorf("could not unmarshal lock content: %w", err) @@ -219,14 +224,14 @@ func (s *Shard) refillLockObject(obj *objectSDK.Object) error { cnr, _ := obj.ContainerID() id, _ := obj.ID() - err := s.metaBase.Lock(cnr, id, locked) + err := s.metaBase.Lock(ctx, cnr, id, locked) if err != nil { return fmt.Errorf("could not lock objects: %w", err) } return nil } -func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error { +func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error { tombstone := objectSDK.NewTombstone() if err := tombstone.Unmarshal(obj.Payload()); err != nil { @@ -249,7 +254,7 @@ func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error { inhumePrm.SetTombstoneAddress(tombAddr) inhumePrm.SetAddresses(tombMembers...) - _, err := s.metaBase.Inhume(inhumePrm) + _, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { return fmt.Errorf("could not inhume objects: %w", err) } @@ -274,7 +279,7 @@ func (s *Shard) Close() error { for _, component := range components { if err := component.Close(); err != nil { lastErr = err - s.log.Error("could not close shard component", zap.Error(err)) + s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } } @@ -289,7 +294,10 @@ func (s *Shard) Close() error { // Reload reloads configuration portions that are necessary. // If a config option is invalid, it logs an error and returns nil. // If there was a problem with applying new configuration, an error is returned. -func (s *Shard) Reload(opts ...Option) error { +func (s *Shard) Reload(ctx context.Context, opts ...Option) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload") + defer span.End() + // Do not use defaultCfg here missing options need not be reloaded. var c cfg for i := range opts { @@ -302,7 +310,7 @@ func (s *Shard) Reload(opts ...Option) error { ok, err := s.metaBase.Reload(c.metaOpts...) if err != nil { if errors.Is(err, meta.ErrDegradedMode) { - s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err)) + s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) } return err @@ -313,17 +321,17 @@ func (s *Shard) Reload(opts ...Option) error { // Here we refill metabase only if a new instance was opened. This is a feature, // we don't want to hang for some time just because we forgot to change // config after the node was updated. - err = s.refillMetabase() + err = s.refillMetabase(ctx) } else { err = s.metaBase.Init() } if err != nil { - s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err)) + s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) return err } } - s.log.Info("trying to restore read-write mode") + s.log.Info(logs.ShardTryingToRestoreReadwriteMode) return s.setMode(mode.ReadWrite) } diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index 50ea20bb8..3f0a19cd4 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -43,6 +43,8 @@ type objAddr struct { } func TestShardOpen(t *testing.T) { + t.Parallel() + dir := t.TempDir() metaPath := filepath.Join(dir, "meta") @@ -111,6 +113,8 @@ func TestShardOpen(t *testing.T) { } func TestRefillMetabaseCorrupted(t *testing.T) { + t.Parallel() + dir := t.TempDir() fsTree := fstree.New( @@ -126,6 +130,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { } sh := New( + WithID(NewIDFromBytes([]byte{})), WithBlobStorOptions(blobOpts...), WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))), WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{}))) @@ -138,12 +143,12 @@ func TestRefillMetabaseCorrupted(t *testing.T) { var putPrm PutPrm putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) require.NoError(t, sh.Close()) addr := object.AddressOf(obj) - _, err = fsTree.Put(common.PutPrm{Address: addr, RawData: []byte("not an object")}) + _, err = fsTree.Put(context.Background(), common.PutPrm{Address: addr, RawData: []byte("not an object")}) require.NoError(t, err) sh = New( @@ -163,6 +168,8 @@ func TestRefillMetabaseCorrupted(t *testing.T) { } func TestRefillMetabase(t *testing.T) { + t.Parallel() + p := t.Name() defer os.RemoveAll(p) @@ -245,13 +252,13 @@ func TestRefillMetabase(t *testing.T) { for _, v := range mObjs { putPrm.SetObject(v.obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) } putPrm.SetObject(tombObj) - _, err = sh.Put(putPrm) + _, err = sh.Put(context.Background(), putPrm) require.NoError(t, err) // LOCK object handling @@ -263,11 +270,11 @@ func TestRefillMetabase(t *testing.T) { objectSDK.WriteLock(lockObj, lock) putPrm.SetObject(lockObj) - _, err = sh.Put(putPrm) + _, err = sh.Put(context.Background(), putPrm) require.NoError(t, err) lockID, _ := lockObj.ID() - require.NoError(t, sh.Lock(cnrLocked, lockID, locked)) + require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked)) var inhumePrm InhumePrm inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...) @@ -368,7 +375,7 @@ func TestRefillMetabase(t *testing.T) { checkObj(object.AddressOf(tombObj), nil) checkTombMembers(false) - err = sh.refillMetabase() + err = sh.refillMetabase(context.Background()) require.NoError(t, err) c, err = sh.metaBase.ObjectCounters() diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 6ae3bf7dd..f086aa30f 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -1,12 +1,17 @@ package shard import ( + "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -27,14 +32,21 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) { // Delete removes data from the shard's writeCache, metaBase and // blobStor. -func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) { +func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.Int("addr_count", len(prm.addr)), + )) + defer span.End() + s.m.RLock() defer s.m.RUnlock() - return s.delete(prm) + return s.delete(ctx, prm) } -func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { +func (s *Shard) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { if s.info.Mode.ReadOnly() { return DeleteRes{}, ErrReadOnlyMode } else if s.info.Mode.NoMetabase() { @@ -47,18 +59,18 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { for i := range prm.addr { if s.hasWriteCache() { - err := s.writeCache.Delete(prm.addr[i]) + err := s.writeCache.Delete(ctx, prm.addr[i]) if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) { - s.log.Warn("can't delete object from write cache", zap.String("error", err.Error())) + s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error())) } } var sPrm meta.StorageIDPrm sPrm.SetAddress(prm.addr[i]) - res, err := s.metaBase.StorageID(sPrm) + res, err := s.metaBase.StorageID(ctx, sPrm) if err != nil { - s.log.Debug("can't get storage ID from metabase", + s.log.Debug(logs.ShardCantGetStorageIDFromMetabase, zap.Stringer("object", prm.addr[i]), zap.String("error", err.Error())) @@ -73,7 +85,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { var delPrm meta.DeletePrm delPrm.SetAddresses(prm.addr...) - res, err := s.metaBase.Delete(delPrm) + res, err := s.metaBase.Delete(ctx, delPrm) if err != nil { return DeleteRes{}, err // stop on metabase error ? } @@ -98,9 +110,9 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { id := smalls[prm.addr[i]] delPrm.StorageID = id - _, err = s.blobStor.Delete(delPrm) + _, err = s.blobStor.Delete(ctx, delPrm) if err != nil { - s.log.Debug("can't remove object from blobStor", + s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor, zap.Stringer("object_address", prm.addr[i]), zap.String("error", err.Error())) } diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go index c37dfa285..441e1c455 100644 --- a/pkg/local_object_storage/shard/delete_test.go +++ b/pkg/local_object_storage/shard/delete_test.go @@ -13,11 +13,15 @@ import ( ) func TestShard_Delete(t *testing.T) { + t.Parallel() + t.Run("without write cache", func(t *testing.T) { + t.Parallel() testShardDelete(t, false) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() testShardDelete(t, true) }) } @@ -43,13 +47,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) { var delPrm shard.DeletePrm delPrm.SetAddresses(object.AddressOf(obj)) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) _, err = testGet(t, sh, getPrm, hasWriteCache) require.NoError(t, err) - _, err = sh.Delete(delPrm) + _, err = sh.Delete(context.TODO(), delPrm) require.NoError(t, err) _, err = sh.Get(context.Background(), getPrm) @@ -67,13 +71,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) { var delPrm shard.DeletePrm delPrm.SetAddresses(object.AddressOf(obj)) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) _, err = sh.Get(context.Background(), getPrm) require.NoError(t, err) - _, err = sh.Delete(delPrm) + _, err = sh.Delete(context.Background(), delPrm) require.NoError(t, err) _, err = sh.Get(context.Background(), getPrm) diff --git a/pkg/local_object_storage/shard/dump.go b/pkg/local_object_storage/shard/dump.go deleted file mode 100644 index 8d9fe0f71..000000000 --- a/pkg/local_object_storage/shard/dump.go +++ /dev/null @@ -1,129 +0,0 @@ -package shard - -import ( - "encoding/binary" - "io" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" -) - -var dumpMagic = []byte("NEOF") - -// DumpPrm groups the parameters of Dump operation. -type DumpPrm struct { - path string - stream io.Writer - ignoreErrors bool -} - -// WithPath is an Dump option to set the destination path. -func (p *DumpPrm) WithPath(path string) { - p.path = path -} - -// WithStream is an Dump option to set the destination stream. -// It takes priority over `path` option. -func (p *DumpPrm) WithStream(r io.Writer) { - p.stream = r -} - -// WithIgnoreErrors is an Dump option to allow ignore all errors during iteration. -// This includes invalid blobovniczas as well as corrupted objects. -func (p *DumpPrm) WithIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// DumpRes groups the result fields of Dump operation. -type DumpRes struct { - count int -} - -// Count return amount of object written. -func (r DumpRes) Count() int { - return r.count -} - -var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode") - -// Dump dumps all objects from the shard to a file or stream. -// -// Returns any error encountered. -func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) { - s.m.RLock() - defer s.m.RUnlock() - - if !s.info.Mode.ReadOnly() { - return DumpRes{}, ErrMustBeReadOnly - } - - w := prm.stream - if w == nil { - f, err := os.OpenFile(prm.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640) - if err != nil { - return DumpRes{}, err - } - defer f.Close() - - w = f - } - - _, err := w.Write(dumpMagic) - if err != nil { - return DumpRes{}, err - } - - var count int - - if s.hasWriteCache() { - var iterPrm writecache.IterationPrm - - iterPrm.WithIgnoreErrors(prm.ignoreErrors) - iterPrm.WithHandler(func(data []byte) error { - var size [4]byte - binary.LittleEndian.PutUint32(size[:], uint32(len(data))) - if _, err := w.Write(size[:]); err != nil { - return err - } - - if _, err := w.Write(data); err != nil { - return err - } - - count++ - return nil - }) - - err := s.writeCache.Iterate(iterPrm) - if err != nil { - return DumpRes{}, err - } - } - - var pi common.IteratePrm - pi.IgnoreErrors = prm.ignoreErrors - pi.Handler = func(elem common.IterationElement) error { - data := elem.ObjectData - - var size [4]byte - binary.LittleEndian.PutUint32(size[:], uint32(len(data))) - if _, err := w.Write(size[:]); err != nil { - return err - } - - if _, err := w.Write(data); err != nil { - return err - } - - count++ - return nil - } - - if _, err := s.blobStor.Iterate(pi); err != nil { - return DumpRes{}, err - } - - return DumpRes{count: count}, nil -} diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go deleted file mode 100644 index 9d585cc06..000000000 --- a/pkg/local_object_storage/shard/dump_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package shard_test - -import ( - "bytes" - "context" - "io" - "math/rand" - "os" - "path/filepath" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/klauspost/compress/zstd" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestDump(t *testing.T) { - t.Run("without write-cache", func(t *testing.T) { - testDump(t, 10, false) - }) - t.Run("with write-cache", func(t *testing.T) { - // Put a bit more objects to write-cache to facilitate race-conditions. - testDump(t, 100, true) - }) -} - -func testDump(t *testing.T, objCount int, hasWriteCache bool) { - const ( - wcSmallObjectSize = 1024 // 1 KiB, goes to write-cache memory - wcBigObjectSize = 4 * 1024 // 4 KiB, goes to write-cache FSTree - bsSmallObjectSize = 10 * 1024 // 10 KiB, goes to blobovnicza DB - bsBigObjectSize = 1024*1024 + 1 // > 1 MiB, goes to blobovnicza FSTree - ) - - var sh *shard.Shard - if !hasWriteCache { - sh = newShard(t, false) - } else { - sh = newCustomShard(t, t.TempDir(), true, - []writecache.Option{ - writecache.WithSmallObjectSize(wcSmallObjectSize), - writecache.WithMaxObjectSize(wcBigObjectSize), - writecache.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), - }, - nil) - } - defer releaseShard(sh, t) - - out := filepath.Join(t.TempDir(), "dump") - var prm shard.DumpPrm - prm.WithPath(out) - - t.Run("must be read-only", func(t *testing.T) { - _, err := sh.Dump(prm) - require.ErrorIs(t, err, shard.ErrMustBeReadOnly) - }) - - require.NoError(t, sh.SetMode(mode.ReadOnly)) - outEmpty := out + ".empty" - var dumpPrm shard.DumpPrm - dumpPrm.WithPath(outEmpty) - - res, err := sh.Dump(dumpPrm) - require.NoError(t, err) - require.Equal(t, 0, res.Count()) - require.NoError(t, sh.SetMode(mode.ReadWrite)) - - // Approximate object header size. - const headerSize = 400 - - objects := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { - cnr := cidtest.ID() - var size int - switch i % 6 { - case 0, 1: - size = wcSmallObjectSize - headerSize - case 2, 3: - size = bsSmallObjectSize - headerSize - case 4: - size = wcBigObjectSize - headerSize - default: - size = bsBigObjectSize - headerSize - } - data := make([]byte, size) - rand.Read(data) - obj := testutil.GenerateObjectWithCIDWithPayload(cnr, data) - objects[i] = obj - - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh.Put(prm) - require.NoError(t, err) - } - - require.NoError(t, sh.SetMode(mode.ReadOnly)) - - t.Run("invalid path", func(t *testing.T) { - var dumpPrm shard.DumpPrm - dumpPrm.WithPath("\x00") - - _, err := sh.Dump(dumpPrm) - require.Error(t, err) - }) - - res, err = sh.Dump(prm) - require.NoError(t, err) - require.Equal(t, objCount, res.Count()) - - t.Run("restore", func(t *testing.T) { - sh := newShard(t, false) - defer releaseShard(sh, t) - - t.Run("empty dump", func(t *testing.T) { - var restorePrm shard.RestorePrm - restorePrm.WithPath(outEmpty) - res, err := sh.Restore(restorePrm) - require.NoError(t, err) - require.Equal(t, 0, res.Count()) - }) - - t.Run("invalid path", func(t *testing.T) { - _, err := sh.Restore(*new(shard.RestorePrm)) - require.ErrorIs(t, err, os.ErrNotExist) - }) - - t.Run("invalid file", func(t *testing.T) { - t.Run("invalid magic", func(t *testing.T) { - out := out + ".wrongmagic" - require.NoError(t, os.WriteFile(out, []byte{0, 0, 0, 0}, os.ModePerm)) - - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) - require.ErrorIs(t, err, shard.ErrInvalidMagic) - }) - - fileData, err := os.ReadFile(out) - require.NoError(t, err) - - t.Run("incomplete size", func(t *testing.T) { - out := out + ".wrongsize" - fileData := append(fileData, 1) - require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) - }) - t.Run("incomplete object data", func(t *testing.T) { - out := out + ".wrongsize" - fileData := append(fileData, 1, 0, 0, 0) - require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) - require.ErrorIs(t, err, io.EOF) - }) - t.Run("invalid object", func(t *testing.T) { - out := out + ".wrongobj" - fileData := append(fileData, 1, 0, 0, 0, 0xFF, 4, 0, 0, 0, 1, 2, 3, 4) - require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) - require.Error(t, err) - - t.Run("skip errors", func(t *testing.T) { - sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil) - t.Cleanup(func() { require.NoError(t, sh.Close()) }) - - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - restorePrm.WithIgnoreErrors(true) - - res, err := sh.Restore(restorePrm) - require.NoError(t, err) - require.Equal(t, objCount, res.Count()) - require.Equal(t, 2, res.FailCount()) - }) - }) - }) - - var prm shard.RestorePrm - prm.WithPath(out) - t.Run("must allow write", func(t *testing.T) { - require.NoError(t, sh.SetMode(mode.ReadOnly)) - - _, err := sh.Restore(prm) - require.ErrorIs(t, err, shard.ErrReadOnlyMode) - }) - - require.NoError(t, sh.SetMode(mode.ReadWrite)) - - checkRestore(t, sh, prm, objects) - }) -} - -func TestStream(t *testing.T) { - sh1 := newCustomShard(t, filepath.Join(t.TempDir(), "shard1"), false, nil, nil) - defer releaseShard(sh1, t) - - sh2 := newCustomShard(t, filepath.Join(t.TempDir(), "shard2"), false, nil, nil) - defer releaseShard(sh2, t) - - const objCount = 5 - objects := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { - cnr := cidtest.ID() - obj := testutil.GenerateObjectWithCID(cnr) - objects[i] = obj - - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh1.Put(prm) - require.NoError(t, err) - } - - require.NoError(t, sh1.SetMode(mode.ReadOnly)) - - r, w := io.Pipe() - finish := make(chan struct{}) - - go func() { - var dumpPrm shard.DumpPrm - dumpPrm.WithStream(w) - - res, err := sh1.Dump(dumpPrm) - require.NoError(t, err) - require.Equal(t, objCount, res.Count()) - require.NoError(t, w.Close()) - close(finish) - }() - - var restorePrm shard.RestorePrm - restorePrm.WithStream(r) - - checkRestore(t, sh2, restorePrm, objects) - require.Eventually(t, func() bool { - select { - case <-finish: - return true - default: - return false - } - }, time.Second, time.Millisecond) -} - -func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) { - res, err := sh.Restore(prm) - require.NoError(t, err) - require.Equal(t, len(objects), res.Count()) - - var getPrm shard.GetPrm - - for i := range objects { - getPrm.SetAddress(object.AddressOf(objects[i])) - res, err := sh.Get(context.Background(), getPrm) - require.NoError(t, err) - require.Equal(t, objects[i], res.Object()) - } -} - -func TestDumpIgnoreErrors(t *testing.T) { - const ( - wcSmallObjectSize = 512 // goes to write-cache memory - wcBigObjectSize = wcSmallObjectSize << 1 // goes to write-cache FSTree - bsSmallObjectSize = wcSmallObjectSize << 2 // goes to blobovnicza DB - - objCount = 10 - headerSize = 400 - ) - - dir := t.TempDir() - bsPath := filepath.Join(dir, "blob") - bsOpts := func(sw uint64) []blobstor.Option { - return []blobstor.Option{ - blobstor.WithCompressObjects(true), - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - blobovniczatree.WithRootPath(filepath.Join(bsPath, "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(1), - blobovniczatree.WithBlobovniczaShallowWidth(sw), - blobovniczatree.WithOpenedCacheSize(1)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) < bsSmallObjectSize - }, - }, - { - Storage: fstree.New( - fstree.WithPath(bsPath), - fstree.WithDepth(1)), - }, - }), - } - } - wcPath := filepath.Join(dir, "writecache") - wcOpts := []writecache.Option{ - writecache.WithPath(wcPath), - writecache.WithSmallObjectSize(wcSmallObjectSize), - writecache.WithMaxObjectSize(wcBigObjectSize), - } - sh := newCustomShard(t, dir, true, wcOpts, bsOpts(2)) - - objects := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { - size := (wcSmallObjectSize << (i % 4)) - headerSize - obj := testutil.GenerateObjectWithCIDWithPayload(cidtest.ID(), make([]byte, size)) - objects[i] = obj - - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh.Put(prm) - require.NoError(t, err) - } - - releaseShard(sh, t) - - b := bytes.NewBuffer(nil) - badObject := make([]byte, 1000) - enc, err := zstd.NewWriter(b) - require.NoError(t, err) - corruptedData := enc.EncodeAll(badObject, nil) - for i := 4; i < len(corruptedData); i++ { - corruptedData[i] ^= 0xFF - } - - // There are 3 different types of errors to consider. - // To setup envirionment we use implementation details so this test must be updated - // if any of them are changed. - { - // 1. Invalid object in fs tree. - // 1.1. Invalid compressed data. - addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString() - dirName := filepath.Join(bsPath, addr[:2]) - require.NoError(t, os.MkdirAll(dirName, os.ModePerm)) - require.NoError(t, os.WriteFile(filepath.Join(dirName, addr[2:]), corruptedData, os.ModePerm)) - - // 1.2. Unreadable file. - addr = cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString() - dirName = filepath.Join(bsPath, addr[:2]) - require.NoError(t, os.MkdirAll(dirName, os.ModePerm)) - - fname := filepath.Join(dirName, addr[2:]) - require.NoError(t, os.WriteFile(fname, []byte{}, 0)) - - // 1.3. Unreadable dir. - require.NoError(t, os.MkdirAll(filepath.Join(bsPath, "ZZ"), 0)) - } - - sh = newCustomShard(t, dir, true, wcOpts, bsOpts(3)) - require.NoError(t, sh.SetMode(mode.ReadOnly)) - - { - // 2. Invalid object in blobovnicza. - // 2.1. Invalid blobovnicza. - bTree := filepath.Join(bsPath, "blobovnicza") - data := make([]byte, 1024) - rand.Read(data) - require.NoError(t, os.WriteFile(filepath.Join(bTree, "0", "2"), data, 0)) - - // 2.2. Invalid object in valid blobovnicza. - var prm blobovnicza.PutPrm - prm.SetAddress(oid.Address{}) - prm.SetMarshaledObject(corruptedData) - b := blobovnicza.New(blobovnicza.WithPath(filepath.Join(bTree, "1", "2"))) - require.NoError(t, b.Open()) - _, err := b.Put(prm) - require.NoError(t, err) - require.NoError(t, b.Close()) - } - - { - // 3. Invalid object in write-cache. Note that because shard is read-only - // the object won't be flushed. - addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString() - dir := filepath.Join(wcPath, addr[:1]) - require.NoError(t, os.MkdirAll(dir, os.ModePerm)) - require.NoError(t, os.WriteFile(filepath.Join(dir, addr[1:]), nil, 0)) - } - - out := filepath.Join(t.TempDir(), "out.dump") - var dumpPrm shard.DumpPrm - dumpPrm.WithPath(out) - dumpPrm.WithIgnoreErrors(true) - res, err := sh.Dump(dumpPrm) - require.NoError(t, err) - require.Equal(t, objCount, res.Count()) -} diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 76e4347d4..66c61fccc 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -3,9 +3,12 @@ package shard import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // ExistsPrm groups the parameters of Exists operation. @@ -36,6 +39,13 @@ func (p ExistsRes) Exists() bool { // Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + var exists bool var err error @@ -54,7 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { existsPrm.SetAddress(prm.addr) var res meta.ExistsRes - res, err = s.metaBase.Exists(existsPrm) + res, err = s.metaBase.Exists(ctx, existsPrm) exists = res.Exists() } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 6f18e6c3a..f4f7a21e2 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" @@ -124,7 +125,7 @@ func (gc *gc) listenEvents(ctx context.Context) { for { event, ok := <-gc.eventChan if !ok { - gc.log.Warn("stop event listener by closed channel") + gc.log.Warn(logs.ShardStopEventListenerByClosedChannel) return } @@ -149,7 +150,7 @@ func (gc *gc) listenEvents(ctx context.Context) { v.prevGroup.Done() }) if err != nil { - gc.log.Warn("could not submit GC job to worker pool", + gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.String("error", err.Error()), ) @@ -174,7 +175,7 @@ func (gc *gc) tickRemover() { close(gc.eventChan) - gc.log.Debug("GC is stopped") + gc.log.Debug(logs.ShardGCIsStopped) return case <-timer.C: gc.remover() @@ -188,7 +189,7 @@ func (gc *gc) stop() { gc.stopChannel <- struct{}{} }) - gc.log.Info("waiting for GC workers to stop...") + gc.log.Info(logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() } @@ -203,6 +204,9 @@ func (s *Shard) removeGarbage() { return } + s.log.Debug(logs.ShardGCRemoveGarbageStarted) + defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted) + buf := make([]oid.Address, 0, s.rmBatchSize) var iterPrm meta.GarbageIterationPrm @@ -220,7 +224,7 @@ func (s *Shard) removeGarbage() { // (no more than s.rmBatchSize objects) err := s.metaBase.IterateOverGarbage(iterPrm) if err != nil { - s.log.Warn("iterator over metabase graveyard failed", + s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed, zap.String("error", err.Error()), ) @@ -233,9 +237,9 @@ func (s *Shard) removeGarbage() { deletePrm.SetAddresses(buf...) // delete accumulated objects - _, err = s.delete(deletePrm) + _, err = s.delete(context.TODO(), deletePrm) if err != nil { - s.log.Warn("could not delete the objects", + s.log.Warn(logs.ShardCouldNotDeleteTheObjects, zap.String("error", err.Error()), ) @@ -258,6 +262,9 @@ func (s *Shard) getExpiredObjectsParameters() (workersCount, batchSize int) { } func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { + s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + workersCount, batchSize := s.getExpiredObjectsParameters() errGroup, egCtx := errgroup.WithContext(ctx) @@ -295,7 +302,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err := errGroup.Wait(); err != nil { - s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error())) + s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) } } @@ -313,15 +320,21 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } + expired, err := s.getExpiredWithLinked(expired) + if err != nil { + s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) + return + } + var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(expired...) inhumePrm.SetGCMark() // inhume the collected objects - res, err := s.metaBase.Inhume(inhumePrm) + res, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - s.log.Warn("could not inhume the objects", + s.log.Warn(logs.ShardCouldNotInhumeTheObjects, zap.String("error", err.Error()), ) @@ -338,11 +351,26 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } } +func (s *Shard) getExpiredWithLinked(source []oid.Address) ([]oid.Address, error) { + result := make([]oid.Address, 0, len(source)) + parentToChildren, err := s.metaBase.GetChildren(source) + if err != nil { + return nil, err + } + for parent, children := range parentToChildren { + result = append(result, parent) + result = append(result, children...) + } + + return result, nil +} + func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) - log.Debug("started expired tombstones handling") + log.Debug(logs.ShardStartedExpiredTombstonesHandling) + defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling) const tssDeleteBatch = 50 tss := make([]meta.TombstonedObject, 0, tssDeleteBatch) @@ -360,12 +388,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { }) for { - log.Debug("iterating tombstones") + log.Debug(logs.ShardIteratingTombstones) s.m.RLock() if s.info.Mode.NoMetabase() { - s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones") + s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) s.m.RUnlock() return @@ -373,7 +401,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { err := s.metaBase.IterateOverGraveyard(iterPrm) if err != nil { - log.Error("iterator over graveyard failed", zap.Error(err)) + log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() return @@ -392,18 +420,19 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } - log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp))) + log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) s.expiredTombstonesCallback(ctx, tssExp) iterPrm.SetOffset(tss[tssLen-1].Address()) tss = tss[:0] tssExp = tssExp[:0] } - - log.Debug("finished expired tombstones handling") } func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { + s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + workersCount, batchSize := s.getExpiredObjectsParameters() errGroup, egCtx := errgroup.WithContext(ctx) @@ -442,7 +471,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err := errGroup.Wait(); err != nil { - s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error())) + s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) } } @@ -484,7 +513,7 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid // and clears up corresponding graveyard records. // // Does not modify tss. -func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) { +func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) { if s.GetMode().NoMetabase() { return } @@ -501,9 +530,9 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) { pInhume.SetAddresses(tsAddrs...) // inhume tombstones - res, err := s.metaBase.Inhume(pInhume) + res, err := s.metaBase.Inhume(ctx, pInhume) if err != nil { - s.log.Warn("could not mark tombstones as garbage", + s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage, zap.String("error", err.Error()), ) @@ -523,7 +552,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) { // from graveyard err = s.metaBase.DropGraves(tss) if err != nil { - s.log.Warn("could not drop expired grave records", zap.Error(err)) + s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) } } @@ -535,7 +564,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] } unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn("failure to unlock objects", + s.log.Warn(logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) @@ -546,9 +575,9 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - res, err := s.metaBase.Inhume(pInhume) + res, err := s.metaBase.Inhume(ctx, pInhume) if err != nil { - s.log.Warn("failure to mark lockers as garbage", + s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage, zap.String("error", err.Error()), ) @@ -570,7 +599,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) { expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked) if err != nil { - s.log.Warn("failure to get expired unlocked objects", zap.Error(err)) + s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) return } @@ -589,7 +618,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn("failure to unlock objects", + s.log.Warn(logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index 8012e60f8..263a0ea4d 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -2,77 +2,30 @@ package shard_test import ( "context" - "path/filepath" + "errors" "testing" "time" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/panjf2000/ants/v2" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) -func Test_GCDropsLockedExpiredObject(t *testing.T) { - var sh *shard.Shard +func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { + t.Parallel() epoch := &epochState{ Value: 100, } - rootPath := t.TempDir() - opts := []shard.Option{ - shard.WithID(shard.NewIDFromBytes([]byte{})), - shard.WithLogger(&logger.Logger{Logger: zap.NewNop()}), - shard.WithBlobStorOptions( - blobstor.WithStorages([]blobstor.SubStorage{ - { - Storage: blobovniczatree.NewBlobovniczaTree( - blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), - blobovniczatree.WithBlobovniczaShallowDepth(2), - blobovniczatree.WithBlobovniczaShallowWidth(2)), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return len(data) <= 1<<20 - }, - }, - { - Storage: fstree.New( - fstree.WithPath(filepath.Join(rootPath, "blob"))), - }, - }), - ), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(rootPath, "meta")), - meta.WithEpochState(epoch), - ), - shard.WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) - }), - shard.WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { - sh.HandleExpiredLocks(ctx, epoch, a) - }), - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - - return pool - }), - } - - sh = shard.New(opts...) - require.NoError(t, sh.Open()) - require.NoError(t, sh.Init(context.Background())) + sh := newCustomShard(t, t.TempDir(), false, nil, nil, []meta.Option{meta.WithEpochState(epoch)}) t.Cleanup(func() { releaseShard(sh, t) @@ -100,14 +53,14 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) { var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - err = sh.Lock(cnr, lockID, []oid.ID{objID}) + err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID}) require.NoError(t, err) putPrm.SetObject(lock) - _, err = sh.Put(putPrm) + _, err = sh.Put(context.Background(), putPrm) require.NoError(t, err) epoch.Value = 105 @@ -120,3 +73,97 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) { return shard.IsErrNotFound(err) }, 3*time.Second, 1*time.Second, "expired object must be deleted") } + +func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { + t.Parallel() + + epoch := &epochState{ + Value: 100, + } + + cnr := cidtest.ID() + parentID := oidtest.ID() + splitID := objectSDK.NewSplitID() + + var objExpirationAttr objectSDK.Attribute + objExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) + objExpirationAttr.SetValue("101") + + var lockExpirationAttr objectSDK.Attribute + lockExpirationAttr.SetKey(objectV2.SysAttributeExpEpoch) + lockExpirationAttr.SetValue("103") + + parent := testutil.GenerateObjectWithCID(cnr) + parent.SetID(parentID) + parent.SetPayload(nil) + parent.SetAttributes(objExpirationAttr) + + const childCount = 10 + children := make([]*objectSDK.Object, childCount) + childIDs := make([]oid.ID, childCount) + for i := range children { + children[i] = testutil.GenerateObjectWithCID(cnr) + if i != 0 { + children[i].SetPreviousID(childIDs[i-1]) + } + if i == len(children)-1 { + children[i].SetParent(parent) + } + children[i].SetSplitID(splitID) + children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)}) + childIDs[i], _ = children[i].ID() + } + + link := testutil.GenerateObjectWithCID(cnr) + link.SetParent(parent) + link.SetParentID(parentID) + link.SetSplitID(splitID) + link.SetChildren(childIDs...) + + linkID, _ := link.ID() + + sh := newCustomShard(t, t.TempDir(), false, nil, nil, []meta.Option{meta.WithEpochState(epoch)}) + + t.Cleanup(func() { + releaseShard(sh, t) + }) + + lock := testutil.GenerateObjectWithCID(cnr) + lock.SetType(objectSDK.TypeLock) + lock.SetAttributes(lockExpirationAttr) + lockID, _ := lock.ID() + + var putPrm shard.PutPrm + + for _, child := range children { + putPrm.SetObject(child) + _, err := sh.Put(context.Background(), putPrm) + require.NoError(t, err) + } + + putPrm.SetObject(link) + _, err := sh.Put(context.Background(), putPrm) + require.NoError(t, err) + + err = sh.Lock(context.Background(), cnr, lockID, append(childIDs, parentID, linkID)) + require.NoError(t, err) + + putPrm.SetObject(lock) + _, err = sh.Put(context.Background(), putPrm) + require.NoError(t, err) + + var getPrm shard.GetPrm + getPrm.SetAddress(objectCore.AddressOf(parent)) + + _, err = sh.Get(context.Background(), getPrm) + var splitInfoError *objectSDK.SplitInfoError + require.True(t, errors.As(err, &splitInfoError), "split info must be provided") + + epoch.Value = 105 + sh.NotificationChannel() <- shard.EventNewEpoch(epoch.Value) + + require.Eventually(t, func() bool { + _, err = sh.Get(context.Background(), getPrm) + return shard.IsErrNotFound(err) + }, 3*time.Second, 1*time.Second, "expired complex object must be deleted on epoch after lock expires") +} diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 3406b9338..5268ac790 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -5,6 +5,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -95,7 +96,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { } skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc) + obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) return GetRes{ obj: obj, @@ -108,7 +109,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { var emptyStorageID = make([]byte, 0) // fetchObjectData looks through writeCache and blobStor to find object. -func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) { +func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) { var ( mErr error mRes meta.ExistsRes @@ -117,7 +118,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, if !skipMeta { var mPrm meta.ExistsPrm mPrm.SetAddress(addr) - mRes, mErr = s.metaBase.Exists(mPrm) + mRes, mErr = s.metaBase.Exists(ctx, mPrm) if mErr != nil && !s.info.Mode.NoMetabase() { return nil, false, mErr } @@ -126,7 +127,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{}) } } else { - s.log.Warn("fetching object without meta", zap.Stringer("addr", addr)) + s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) } if s.hasWriteCache() { @@ -135,11 +136,11 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, return res, false, err } if IsErrNotFound(err) { - s.log.Debug("object is missing in write-cache", + s.log.Debug(logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta)) } else { - s.log.Error("failed to fetch object from write-cache", + s.log.Error(logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta)) @@ -153,7 +154,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, var mPrm meta.StorageIDPrm mPrm.SetAddress(addr) - mExRes, err := s.metaBase.StorageID(mPrm) + mExRes, err := s.metaBase.StorageID(ctx, mPrm) if err != nil { return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err) } diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go index f670b2864..2db86c48a 100644 --- a/pkg/local_object_storage/shard/get_test.go +++ b/pkg/local_object_storage/shard/get_test.go @@ -17,11 +17,15 @@ import ( ) func TestShard_Get(t *testing.T) { + t.Parallel() + t.Run("without write cache", func(t *testing.T) { + t.Parallel() testShardGet(t, false) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() testShardGet(t, true) }) } @@ -40,7 +44,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) getPrm.SetAddress(object.AddressOf(obj)) @@ -58,7 +62,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) getPrm.SetAddress(object.AddressOf(obj)) @@ -86,7 +90,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { putPrm.SetObject(child) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) getPrm.SetAddress(object.AddressOf(child)) diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index 8e8ff9433..a15cdfdca 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -73,7 +73,7 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetRaw(prm.raw) var res meta.GetRes - res, err = s.metaBase.Get(headParams) + res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() } diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go index 449626e93..7e336ea06 100644 --- a/pkg/local_object_storage/shard/head_test.go +++ b/pkg/local_object_storage/shard/head_test.go @@ -15,11 +15,15 @@ import ( ) func TestShard_Head(t *testing.T) { + t.Parallel() + t.Run("without write cache", func(t *testing.T) { + t.Parallel() testShardHead(t, false) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() testShardHead(t, true) }) } @@ -37,7 +41,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) headPrm.SetAddress(object.AddressOf(obj)) @@ -62,7 +66,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { putPrm.SetObject(child) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) headPrm.SetAddress(object.AddressOf(parent)) diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 40a5bf22e..12a2900ac 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -5,8 +5,12 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -61,6 +65,12 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval // // Returns ErrReadOnlyMode error if shard is in "read-only" mode. func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + )) + defer span.End() + s.m.RLock() if s.info.Mode.ReadOnly() { @@ -73,7 +83,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { if s.hasWriteCache() { for i := range prm.target { - _ = s.writeCache.Delete(prm.target[i]) + _ = s.writeCache.Delete(ctx, prm.target[i]) } } @@ -91,14 +101,14 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { metaPrm.SetForceGCMark() } - res, err := s.metaBase.Inhume(metaPrm) + res, err := s.metaBase.Inhume(ctx, metaPrm) if err != nil { if errors.Is(err, meta.ErrLockObjectRemoval) { s.m.RUnlock() return InhumeRes{}, ErrLockObjectRemoval } - s.log.Debug("could not mark object to delete in metabase", + s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go index 41845c414..4151d6218 100644 --- a/pkg/local_object_storage/shard/inhume_test.go +++ b/pkg/local_object_storage/shard/inhume_test.go @@ -13,11 +13,15 @@ import ( ) func TestShard_Inhume(t *testing.T) { + t.Parallel() + t.Run("without write cache", func(t *testing.T) { + t.Parallel() testShardInhume(t, false) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() testShardInhume(t, true) }) } @@ -42,7 +46,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) { var getPrm shard.GetPrm getPrm.SetAddress(object.AddressOf(obj)) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) _, err = testGet(t, sh, getPrm, hasWriteCache) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 9efca8983..aaa1112cd 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -1,8 +1,10 @@ package shard import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -84,9 +86,9 @@ func (s *Shard) List() (res SelectRes, err error) { sPrm.SetContainerID(lst[i]) sPrm.SetFilters(filters) - sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase + sRes, err := s.metaBase.Select(context.TODO(), sPrm) // consider making List in metabase if err != nil { - s.log.Debug("can't select all objects", + s.log.Debug(logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), zap.String("error", err.Error())) diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 33c9e489a..bbce28430 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -1,6 +1,8 @@ package shard_test import ( + "context" + "sync" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -8,22 +10,23 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestShard_List(t *testing.T) { - sh := newShard(t, false) - shWC := newShard(t, true) - - defer func() { - releaseShard(sh, t) - releaseShard(shWC, t) - }() + t.Parallel() t.Run("without write cache", func(t *testing.T) { + t.Parallel() + sh := newShard(t, false) + defer releaseShard(sh, t) testShardList(t, sh) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() + shWC := newShard(t, true) + defer releaseShard(shWC, t) testShardList(t, shWC) }) } @@ -32,30 +35,41 @@ func testShardList(t *testing.T, sh *shard.Shard) { const C = 10 const N = 5 + var mtx sync.Mutex objs := make(map[string]int) - var putPrm shard.PutPrm + var errG errgroup.Group + errG.SetLimit(C * N) for i := 0; i < C; i++ { - cnr := cidtest.ID() + errG.Go(func() error { + cnr := cidtest.ID() - for j := 0; j < N; j++ { - obj := testutil.GenerateObjectWithCID(cnr) - testutil.AddPayload(obj, 1<<2) + for j := 0; j < N; j++ { + errG.Go(func() error { + obj := testutil.GenerateObjectWithCID(cnr) + testutil.AddPayload(obj, 1<<2) - // add parent as virtual object, it must be ignored in List() - parent := testutil.GenerateObjectWithCID(cnr) - idParent, _ := parent.ID() - obj.SetParentID(idParent) - obj.SetParent(parent) + // add parent as virtual object, it must be ignored in List() + parent := testutil.GenerateObjectWithCID(cnr) + idParent, _ := parent.ID() + obj.SetParentID(idParent) + obj.SetParent(parent) - objs[object.AddressOf(obj).EncodeToString()] = 0 + mtx.Lock() + objs[object.AddressOf(obj).EncodeToString()] = 0 + mtx.Unlock() - putPrm.SetObject(obj) + var putPrm shard.PutPrm + putPrm.SetObject(obj) - _, err := sh.Put(putPrm) - require.NoError(t, err) - } + _, err := sh.Put(context.Background(), putPrm) + return err + }) + } + return nil + }) } + require.NoError(t, errG.Wait()) res, err := sh.List() require.NoError(t, err) diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index d8113cf30..cfbd94c5b 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -1,11 +1,15 @@ package shard import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // Lock marks objects as locked with another object. All objects from the @@ -14,7 +18,16 @@ import ( // Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject). // // Locked list should be unique. Panics if it is empty. -func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error { +func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", idCnr.EncodeToString()), + attribute.String("locker", locker.EncodeToString()), + attribute.Int("locked_count", len(locked)), + )) + defer span.End() + s.m.RLock() defer s.m.RUnlock() @@ -25,7 +38,7 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error { return ErrDegradedMode } - err := s.metaBase.Lock(idCnr, locker, locked) + err := s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -35,7 +48,14 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error { // IsLocked checks object locking relation of the provided object. Not found object is // considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) IsLocked(addr oid.Address) (bool, error) { +func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("address", addr.EncodeToString()), + )) + defer span.End() + m := s.GetMode() if m.NoMetabase() { return false, ErrDegradedMode @@ -44,7 +64,7 @@ func (s *Shard) IsLocked(addr oid.Address) (bool, error) { var prm meta.IsLockedPrm prm.SetAddress(addr) - res, err := s.metaBase.IsLocked(prm) + res, err := s.metaBase.IsLocked(ctx, prm) if err != nil { return false, err } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 2bee66298..75010179a 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -23,6 +23,8 @@ import ( ) func TestShard_Lock(t *testing.T) { + t.Parallel() + var sh *shard.Shard rootPath := t.TempDir() @@ -76,16 +78,16 @@ func TestShard_Lock(t *testing.T) { var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) // lock the object - err = sh.Lock(cnr, lockID, []oid.ID{objID}) + err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID}) require.NoError(t, err) putPrm.SetObject(lock) - _, err = sh.Put(putPrm) + _, err = sh.Put(context.Background(), putPrm) require.NoError(t, err) t.Run("inhuming locked objects", func(t *testing.T) { @@ -158,21 +160,21 @@ func TestShard_IsLocked(t *testing.T) { var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) // not locked object is not locked - locked, err := sh.IsLocked(objectcore.AddressOf(obj)) + locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj)) require.NoError(t, err) require.False(t, locked) // locked object is locked - require.NoError(t, sh.Lock(cnrID, lockID, []oid.ID{objID})) + require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID})) - locked, err = sh.IsLocked(objectcore.AddressOf(obj)) + locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj)) require.NoError(t, err) require.True(t, locked) diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 18e97e259..16f6989c4 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -72,6 +72,8 @@ const physical = "phy" const logical = "logic" func TestCounters(t *testing.T) { + t.Parallel() + dir := t.TempDir() sh, mm := shardWithMetrics(t, dir) @@ -109,7 +111,7 @@ func TestCounters(t *testing.T) { for i := 0; i < objNumber; i++ { prm.SetObject(oo[i]) - _, err := sh.Put(prm) + _, err := sh.Put(context.Background(), prm) require.NoError(t, err) } @@ -168,7 +170,7 @@ func TestCounters(t *testing.T) { deletedNumber := int(phy / 4) prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...) - _, err := sh.Delete(prm) + _, err := sh.Delete(context.Background(), prm) require.NoError(t, err) require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical]) @@ -207,6 +209,7 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) { } sh := shard.New( + shard.WithID(shard.NewIDFromBytes([]byte{})), shard.WithBlobStorOptions(blobOpts...), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))), shard.WithMetaBaseOptions( diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index 17ed3f3c8..50c52accc 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -1,6 +1,7 @@ package shard import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "go.uber.org/zap" @@ -25,7 +26,7 @@ func (s *Shard) SetMode(m mode.Mode) error { } func (s *Shard) setMode(m mode.Mode) error { - s.log.Info("setting shard mode", + s.log.Info(logs.ShardSettingShardMode, zap.Stringer("old_mode", s.info.Mode), zap.Stringer("new_mode", m)) @@ -66,7 +67,7 @@ func (s *Shard) setMode(m mode.Mode) error { s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite) } - s.log.Info("shard mode set successfully", + s.log.Info(logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) return nil } diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go index c6bf8409e..119910623 100644 --- a/pkg/local_object_storage/shard/move.go +++ b/pkg/local_object_storage/shard/move.go @@ -1,8 +1,14 @@ package shard import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -22,7 +28,14 @@ func (p *ToMoveItPrm) SetAddress(addr oid.Address) { // ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to // another shard. -func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) { +func (s *Shard) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (ToMoveItRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ToMoveIt", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("address", prm.addr.EncodeToString()), + )) + defer span.End() + s.m.RLock() defer s.m.RUnlock() @@ -36,9 +49,9 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) { var toMovePrm meta.ToMoveItPrm toMovePrm.SetAddress(prm.addr) - _, err := s.metaBase.ToMoveIt(toMovePrm) + _, err := s.metaBase.ToMoveIt(ctx, toMovePrm) if err != nil { - s.log.Debug("could not mark object for shard relocation in metabase", + s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase, zap.String("error", err.Error()), ) } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 48dbe1be2..d7d4ae538 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -1,12 +1,17 @@ package shard import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -29,7 +34,14 @@ func (p *PutPrm) SetObject(obj *object.Object) { // did not allow to completely save the object. // // Returns ErrReadOnlyMode error if shard is in "read-only" mode. -func (s *Shard) Put(prm PutPrm) (PutRes, error) { +func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()), + )) + defer span.End() + s.m.RLock() defer s.m.RUnlock() @@ -54,15 +66,15 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) { // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() if tryCache { - res, err = s.writeCache.Put(putPrm) + res, err = s.writeCache.Put(ctx, putPrm) } if err != nil || !tryCache { if err != nil { - s.log.Debug("can't put object to the write-cache, trying blobstor", + s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, zap.String("err", err.Error())) } - res, err = s.blobStor.Put(putPrm) + res, err = s.blobStor.Put(ctx, putPrm) if err != nil { return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) } @@ -72,7 +84,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) { var pPrm meta.PutPrm pPrm.SetObject(prm.obj) pPrm.SetStorageID(res.StorageID) - if _, err := s.metaBase.Put(pPrm); err != nil { + if _, err := s.metaBase.Put(ctx, pPrm); err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 4355c31a3..06aea2f8a 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -123,7 +123,7 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { } skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc) + obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) return RngRes{ obj: obj, diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 164181214..9ef2106b0 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -22,11 +22,14 @@ import ( ) func TestShard_GetRange(t *testing.T) { + t.Parallel() t.Run("without write cache", func(t *testing.T) { + t.Parallel() testShardGetRange(t, false) }) t.Run("with write cache", func(t *testing.T) { + t.Parallel() testShardGetRange(t, true) }) } @@ -84,7 +87,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { Storage: fstree.New( fstree.WithPath(filepath.Join(t.TempDir(), "blob"))), }, - })}) + })}, + nil) defer releaseShard(sh, t) for _, tc := range testCases { @@ -99,7 +103,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { var putPrm shard.PutPrm putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) var rngPrm shard.RngPrm diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go index 1bfa33dd7..0b964ba2e 100644 --- a/pkg/local_object_storage/shard/reload_test.go +++ b/pkg/local_object_storage/shard/reload_test.go @@ -24,6 +24,8 @@ import ( ) func TestShardReload(t *testing.T) { + t.Parallel() + p := t.Name() defer os.RemoveAll(p) @@ -44,6 +46,7 @@ func TestShardReload(t *testing.T) { meta.WithEpochState(epochState{})} opts := []Option{ + WithID(NewIDFromBytes([]byte{})), WithLogger(l), WithBlobStorOptions(blobOpts...), WithMetaBaseOptions(metaOpts...), @@ -75,7 +78,7 @@ func TestShardReload(t *testing.T) { checkHasObjects(t, true) t.Run("same config, no-op", func(t *testing.T) { - require.NoError(t, sh.Reload(opts...)) + require.NoError(t, sh.Reload(context.Background(), opts...)) checkHasObjects(t, true) }) @@ -86,7 +89,7 @@ func TestShardReload(t *testing.T) { } newOpts := newShardOpts(filepath.Join(p, "meta1"), false) - require.NoError(t, sh.Reload(newOpts...)) + require.NoError(t, sh.Reload(context.Background(), newOpts...)) checkHasObjects(t, false) // new path, but no resync @@ -97,7 +100,7 @@ func TestShardReload(t *testing.T) { }) newOpts = newShardOpts(filepath.Join(p, "meta2"), true) - require.NoError(t, sh.Reload(newOpts...)) + require.NoError(t, sh.Reload(context.Background(), newOpts...)) checkHasObjects(t, true) // all objects are restored, including the new one @@ -106,7 +109,7 @@ func TestShardReload(t *testing.T) { require.NoError(t, os.WriteFile(badPath, []byte{1}, 0)) newOpts = newShardOpts(badPath, true) - require.Error(t, sh.Reload(newOpts...)) + require.Error(t, sh.Reload(context.Background(), newOpts...)) // Cleanup is done, no panic. obj := newObject() @@ -117,7 +120,7 @@ func TestShardReload(t *testing.T) { // Successive reload produces no undesired effects. require.NoError(t, os.RemoveAll(badPath)) - require.NoError(t, sh.Reload(newOpts...)) + require.NoError(t, sh.Reload(context.Background(), newOpts...)) obj = newObject() require.NoError(t, putObject(sh, obj)) @@ -132,7 +135,7 @@ func putObject(sh *Shard, obj *objectSDK.Object) error { var prm PutPrm prm.SetObject(obj) - _, err := sh.Put(prm) + _, err := sh.Put(context.Background(), prm) return err } diff --git a/pkg/local_object_storage/shard/restore.go b/pkg/local_object_storage/shard/restore.go deleted file mode 100644 index 73dc1d178..000000000 --- a/pkg/local_object_storage/shard/restore.go +++ /dev/null @@ -1,134 +0,0 @@ -package shard - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// ErrInvalidMagic is returned when dump format is invalid. -var ErrInvalidMagic = logicerr.New("invalid magic") - -// RestorePrm groups the parameters of Restore operation. -type RestorePrm struct { - path string - stream io.Reader - ignoreErrors bool -} - -// WithPath is a Restore option to set the destination path. -func (p *RestorePrm) WithPath(path string) { - p.path = path -} - -// WithStream is a Restore option to set the stream to read objects from. -// It takes priority over `WithPath` option. -func (p *RestorePrm) WithStream(r io.Reader) { - p.stream = r -} - -// WithIgnoreErrors is a Restore option which allows to ignore errors encountered during restore. -// Corrupted objects will not be processed. -func (p *RestorePrm) WithIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// RestoreRes groups the result fields of Restore operation. -type RestoreRes struct { - count int - failed int -} - -// Count return amount of object written. -func (r RestoreRes) Count() int { - return r.count -} - -// FailCount return amount of object skipped. -func (r RestoreRes) FailCount() int { - return r.failed -} - -// Restore restores objects from the dump prepared by Dump. -// -// Returns any error encountered. -func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) { - // Disallow changing mode during restore. - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.ReadOnly() { - return RestoreRes{}, ErrReadOnlyMode - } - - r := prm.stream - if r == nil { - f, err := os.OpenFile(prm.path, os.O_RDONLY, os.ModeExclusive) - if err != nil { - return RestoreRes{}, err - } - defer f.Close() - - r = f - } - - var m [4]byte - _, _ = io.ReadFull(r, m[:]) - if !bytes.Equal(m[:], dumpMagic) { - return RestoreRes{}, ErrInvalidMagic - } - - var putPrm PutPrm - - var count, failCount int - var data []byte - var size [4]byte - for { - // If there are less than 4 bytes left, `Read` returns nil error instead of - // io.ErrUnexpectedEOF, thus `ReadFull` is used. - _, err := io.ReadFull(r, size[:]) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return RestoreRes{}, err - } - - sz := binary.LittleEndian.Uint32(size[:]) - if uint32(cap(data)) < sz { - data = make([]byte, sz) - } else { - data = data[:sz] - } - - _, err = r.Read(data) - if err != nil { - return RestoreRes{}, err - } - - obj := object.New() - err = obj.Unmarshal(data) - if err != nil { - if prm.ignoreErrors { - failCount++ - continue - } - return RestoreRes{}, err - } - - putPrm.SetObject(obj) - _, err = s.Put(putPrm) - if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) { - return RestoreRes{}, err - } - - count++ - } - - return RestoreRes{count: count, failed: failCount}, nil -} diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 4bb467d48..7f776c18a 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -1,12 +1,16 @@ package shard import ( + "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // SelectPrm groups the parameters of Select operation. @@ -39,7 +43,14 @@ func (r SelectRes) AddressList() []oid.Address { // // Returns any error encountered that // did not allow to completely select the objects. -func (s *Shard) Select(prm SelectPrm) (SelectRes, error) { +func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Select", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", prm.cnr.EncodeToString()), + )) + defer span.End() + s.m.RLock() defer s.m.RUnlock() @@ -51,7 +62,7 @@ func (s *Shard) Select(prm SelectPrm) (SelectRes, error) { selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) - mRes, err := s.metaBase.Select(selectPrm) + mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) } diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 6d1fba141..44ec54645 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -349,7 +350,7 @@ func (s *Shard) updateMetrics() { if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() { cc, err := s.metaBase.ObjectCounters() if err != nil { - s.log.Warn("meta: object counter read", + s.log.Warn(logs.ShardMetaObjectCounterRead, zap.Error(err), ) @@ -361,7 +362,7 @@ func (s *Shard) updateMetrics() { cnrList, err := s.metaBase.Containers() if err != nil { - s.log.Warn("meta: can't read container list", zap.Error(err)) + s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err)) return } @@ -370,7 +371,7 @@ func (s *Shard) updateMetrics() { for i := range cnrList { size, err := s.metaBase.ContainerSize(cnrList[i]) if err != nil { - s.log.Warn("meta: can't read container size", + s.log.Warn(logs.ShardMetaCantReadContainerSize, zap.String("cid", cnrList[i].EncodeToString()), zap.Error(err)) continue diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index fea342766..7b2fdb5d1 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -4,6 +4,7 @@ import ( "context" "path/filepath" "testing" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" @@ -12,8 +13,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -29,11 +33,13 @@ func (s epochState) CurrentEpoch() uint64 { func newShard(t testing.TB, enableWriteCache bool) *shard.Shard { return newCustomShard(t, t.TempDir(), enableWriteCache, + nil, nil, nil) } -func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option, bsOpts []blobstor.Option) *shard.Shard { +func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option, bsOpts []blobstor.Option, metaOptions []meta.Option) *shard.Shard { + var sh *shard.Shard if enableWriteCache { rootPath = filepath.Join(rootPath, "wc") } else { @@ -67,8 +73,9 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithBlobStorOptions(bsOpts...), shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(rootPath, "meta")), - meta.WithEpochState(epochState{}), + append([]meta.Option{ + meta.WithPath(filepath.Join(rootPath, "meta")), meta.WithEpochState(epochState{})}, + metaOptions...)..., ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))), shard.WithWriteCache(enableWriteCache), @@ -77,9 +84,21 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts []writecache.Option{writecache.WithPath(filepath.Join(rootPath, "wcache"))}, wcOpts...)..., ), + shard.WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(addresses) + }), + shard.WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { + sh.HandleExpiredLocks(ctx, epoch, a) + }), + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + require.NoError(t, err) + return pool + }), + shard.WithGCRemoverSleepInterval(1 * time.Millisecond), } - sh := shard.New(opts...) + sh = shard.New(opts...) require.NoError(t, sh.Open()) require.NoError(t, sh.Init(context.Background())) diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go index 5fd13221a..5fe9fd7e9 100644 --- a/pkg/local_object_storage/shard/shutdown_test.go +++ b/pkg/local_object_storage/shard/shutdown_test.go @@ -12,9 +12,12 @@ import ( cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestWriteCacheObjectLoss(t *testing.T) { + t.Parallel() + const ( smallSize = 1024 objCount = 100 @@ -37,18 +40,22 @@ func TestWriteCacheObjectLoss(t *testing.T) { writecache.WithSmallObjectSize(smallSize), writecache.WithMaxObjectSize(smallSize * 2)} - sh := newCustomShard(t, dir, true, wcOpts, nil) - - var putPrm shard.PutPrm + sh := newCustomShard(t, dir, true, wcOpts, nil, nil) + var errG errgroup.Group for i := range objects { - putPrm.SetObject(objects[i]) - _, err := sh.Put(putPrm) - require.NoError(t, err) + obj := objects[i] + errG.Go(func() error { + var putPrm shard.PutPrm + putPrm.SetObject(obj) + _, err := sh.Put(context.Background(), putPrm) + return err + }) } + require.NoError(t, errG.Wait()) require.NoError(t, sh.Close()) - sh = newCustomShard(t, dir, true, wcOpts, nil) + sh = newCustomShard(t, dir, true, wcOpts, nil, nil) defer releaseShard(sh, t) var getPrm shard.GetPrm diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index db07c001e..d5b3b67bf 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -1,9 +1,15 @@ package shard import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var _ pilorama.Forest = (*Shard)(nil) @@ -12,7 +18,18 @@ var _ pilorama.Forest = (*Shard)(nil) var ErrPiloramaDisabled = logicerr.New("pilorama is disabled") // TreeMove implements the pilorama.Forest interface. -func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { +func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeMove", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + if s.pilorama == nil { return nil, ErrPiloramaDisabled } @@ -26,11 +43,25 @@ func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Mo if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - return s.pilorama.TreeMove(d, treeID, m) + return s.pilorama.TreeMove(ctx, d, treeID, m) } // TreeAddByPath implements the pilorama.Forest interface. -func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) { +func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeAddByPath", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", d.CID.EncodeToString()), + attribute.Int("position", d.Position), + attribute.Int("size", d.Size), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Int("meta_count", len(meta)), + ), + ) + defer span.End() + if s.pilorama == nil { return nil, ErrPiloramaDisabled } @@ -44,11 +75,21 @@ func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr stri if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - return s.pilorama.TreeAddByPath(d, treeID, attr, path, meta) + return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } // TreeApply implements the pilorama.Forest interface. -func (s *Shard) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { +func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApply", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.Bool("background", backgroundSync), + ), + ) + defer span.End() + if s.pilorama == nil { return ErrPiloramaDisabled } @@ -62,11 +103,23 @@ func (s *Shard) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgr if s.info.Mode.NoMetabase() { return ErrDegradedMode } - return s.pilorama.TreeApply(cnr, treeID, m, backgroundSync) + return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } // TreeGetByPath implements the pilorama.Forest interface. -func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { +func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("attr", attr), + attribute.Int("path_count", len(path)), + attribute.Bool("latest", latest), + ), + ) + defer span.End() + if s.pilorama == nil { return nil, ErrPiloramaDisabled } @@ -77,11 +130,21 @@ func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path [] if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - return s.pilorama.TreeGetByPath(cid, treeID, attr, path, latest) + return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } // TreeGetMeta implements the pilorama.Forest interface. -func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { +func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetMeta", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + if s.pilorama == nil { return pilorama.Meta{}, 0, ErrPiloramaDisabled } @@ -92,11 +155,21 @@ func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } - return s.pilorama.TreeGetMeta(cid, treeID, nodeID) + return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } // TreeGetChildren implements the pilorama.Forest interface. -func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) { +func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetChildren", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("node_id", fmt.Sprintf("%d", nodeID)), + ), + ) + defer span.End() + if s.pilorama == nil { return nil, ErrPiloramaDisabled } @@ -107,11 +180,21 @@ func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.No if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - return s.pilorama.TreeGetChildren(cid, treeID, nodeID) + return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } // TreeGetOpLog implements the pilorama.Forest interface. -func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { +func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetOpLog", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + if s.pilorama == nil { return pilorama.Move{}, ErrPiloramaDisabled } @@ -122,11 +205,20 @@ func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilor if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } - return s.pilorama.TreeGetOpLog(cid, treeID, height) + return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } // TreeDrop implements the pilorama.Forest interface. -func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error { +func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeDrop", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + if s.pilorama == nil { return ErrPiloramaDisabled } @@ -137,11 +229,19 @@ func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error { if s.info.Mode.NoMetabase() { return ErrDegradedMode } - return s.pilorama.TreeDrop(cid, treeID) + return s.pilorama.TreeDrop(ctx, cid, treeID) } // TreeList implements the pilorama.Forest interface. -func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) { +func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeList", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + ), + ) + defer span.End() + if s.pilorama == nil { return nil, ErrPiloramaDisabled } @@ -152,11 +252,20 @@ func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } - return s.pilorama.TreeList(cid) + return s.pilorama.TreeList(ctx, cid) } // TreeExists implements the pilorama.Forest interface. -func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { +func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeExists", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + if s.pilorama == nil { return false, ErrPiloramaDisabled } @@ -167,11 +276,21 @@ func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) { if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } - return s.pilorama.TreeExists(cid, treeID) + return s.pilorama.TreeExists(ctx, cid, treeID) } // TreeUpdateLastSyncHeight implements the pilorama.Forest interface. -func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error { +func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeUpdateLastSyncHeight", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + attribute.String("height", fmt.Sprintf("%d", height)), + ), + ) + defer span.End() + if s.pilorama == nil { return ErrPiloramaDisabled } @@ -185,11 +304,20 @@ func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height ui if s.info.Mode.NoMetabase() { return ErrDegradedMode } - return s.pilorama.TreeUpdateLastSyncHeight(cid, treeID, height) + return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } // TreeLastSyncHeight implements the pilorama.Forest interface. -func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) { +func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeLastSyncHeight", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cid.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + if s.pilorama == nil { return 0, ErrPiloramaDisabled } @@ -200,5 +328,5 @@ func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } - return s.pilorama.TreeLastSyncHeight(cid, treeID) + return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index 7282f121c..245eb4c70 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -1,7 +1,12 @@ package shard import ( + "context" "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation. @@ -19,7 +24,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) { var errWriteCacheDisabled = errors.New("write-cache is disabled") // FlushWriteCache flushes all data from the write-cache. -func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error { +func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.FlushWriteCache", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.Bool("ignore_errors", p.ignoreErrors), + )) + defer span.End() + if !s.hasWriteCache() { return errWriteCacheDisabled } @@ -35,5 +47,5 @@ func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error { return ErrDegradedMode } - return s.writeCache.Flush(p.ignoreErrors) + return s.writeCache.Flush(ctx, p.ignoreErrors) } diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go index dcfea8dd0..c1aab9e5a 100644 --- a/pkg/local_object_storage/writecache/delete.go +++ b/pkg/local_object_storage/writecache/delete.go @@ -1,16 +1,27 @@ package writecache import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // Delete removes object from write-cache. // // Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache. -func (c *cache) Delete(addr oid.Address) error { +func (c *cache) Delete(ctx context.Context, addr oid.Address) error { + ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete", + trace.WithAttributes( + attribute.String("address", addr.EncodeToString()), + )) + defer span.End() + c.modeMtx.RLock() defer c.modeMtx.RUnlock() if c.readOnly() { @@ -45,7 +56,7 @@ func (c *cache) Delete(addr oid.Address) error { return nil } - _, err := c.fsTree.Delete(common.DeletePrm{Address: addr}) + _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr}) if err == nil { storagelog.Write(c.log, storagelog.AddressField(saddr), diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 0437367e7..04fcccede 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -2,9 +2,12 @@ package writecache import ( "bytes" + "context" "errors" "time" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -14,6 +17,8 @@ import ( "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/util/slice" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -36,7 +41,10 @@ func (c *cache) runFlushLoop() { } c.wg.Add(1) - go c.flushBigObjects() + go func() { + c.flushBigObjects(context.TODO()) + c.wg.Done() + }() c.wg.Add(1) go func() { @@ -134,15 +142,13 @@ func (c *cache) flushDB() { c.modeMtx.RUnlock() - c.log.Debug("tried to flush items from write-cache", + c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache, zap.Int("count", count), zap.String("start", base58.Encode(lastKey))) } } -func (c *cache) flushBigObjects() { - defer c.wg.Done() - +func (c *cache) flushBigObjects(ctx context.Context) { tick := time.NewTicker(defaultFlushInterval * 10) for { select { @@ -156,7 +162,7 @@ func (c *cache) flushBigObjects() { continue } - _ = c.flushFSTree(true) + _ = c.flushFSTree(ctx, true) c.modeMtx.RUnlock() case <-c.closeCh: @@ -175,7 +181,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) { } } -func (c *cache) flushFSTree(ignoreErrors bool) error { +func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { var prm common.IteratePrm prm.IgnoreErrors = ignoreErrors prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error { @@ -204,7 +210,7 @@ func (c *cache) flushFSTree(ignoreErrors bool) error { return err } - err = c.flushObject(&obj, data) + err = c.flushObject(ctx, &obj, data) if err != nil { if ignoreErrors { return nil @@ -235,7 +241,7 @@ func (c *cache) flushWorker(_ int) { return } - err := c.flushObject(obj, nil) + err := c.flushObject(context.TODO(), obj, nil) if err == nil { c.flushed.Add(objectCore.AddressOf(obj).EncodeToString(), true) } @@ -243,14 +249,14 @@ func (c *cache) flushWorker(_ int) { } // flushObject is used to write object directly to the main storage. -func (c *cache) flushObject(obj *object.Object, data []byte) error { +func (c *cache) flushObject(ctx context.Context, obj *object.Object, data []byte) error { addr := objectCore.AddressOf(obj) var prm common.PutPrm prm.Object = obj prm.RawData = data - res, err := c.blobstor.Put(prm) + res, err := c.blobstor.Put(ctx, prm) if err != nil { if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) && !errors.Is(err, blobstor.ErrNoPlaceFound) { @@ -275,15 +281,21 @@ func (c *cache) flushObject(obj *object.Object, data []byte) error { // Flush flushes all objects from the write-cache to the main storage. // Write-cache must be in readonly mode to ensure correctness of an operation and // to prevent interference with background flush workers. -func (c *cache) Flush(ignoreErrors bool) error { +func (c *cache) Flush(ctx context.Context, ignoreErrors bool) error { + ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush", + trace.WithAttributes( + attribute.Bool("ignore_errors", ignoreErrors), + )) + defer span.End() + c.modeMtx.RLock() defer c.modeMtx.RUnlock() - return c.flush(ignoreErrors) + return c.flush(ctx, ignoreErrors) } -func (c *cache) flush(ignoreErrors bool) error { - if err := c.flushFSTree(ignoreErrors); err != nil { +func (c *cache) flush(ctx context.Context, ignoreErrors bool) error { + if err := c.flushFSTree(ctx, ignoreErrors); err != nil { return err } @@ -315,7 +327,7 @@ func (c *cache) flush(ignoreErrors bool) error { return err } - if err := c.flushObject(&obj, data); err != nil { + if err := c.flushObject(ctx, &obj, data); err != nil { return err } } diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 9dc216fb3..2cec07081 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -89,7 +89,7 @@ func TestFlush(t *testing.T) { var mPrm meta.StorageIDPrm mPrm.SetAddress(objects[i].addr) - mRes, err := mb.StorageID(mPrm) + mRes, err := mb.StorageID(context.Background(), mPrm) require.NoError(t, err) var prm common.GetPrm @@ -112,12 +112,12 @@ func TestFlush(t *testing.T) { wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true) wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false) - require.NoError(t, wc.Flush(false)) + require.NoError(t, wc.Flush(context.Background(), false)) for i := 0; i < 2; i++ { var mPrm meta.GetPrm mPrm.SetAddress(objects[i].addr) - _, err := mb.Get(mPrm) + _, err := mb.Get(context.Background(), mPrm) require.Error(t, err) _, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr}) @@ -147,7 +147,7 @@ func TestFlush(t *testing.T) { for i := 0; i < 2; i++ { var mPrm meta.GetPrm mPrm.SetAddress(objects[i].addr) - _, err := mb.Get(mPrm) + _, err := mb.Get(context.Background(), mPrm) require.Error(t, err) _, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr}) @@ -171,9 +171,9 @@ func TestFlush(t *testing.T) { require.NoError(t, mb.SetMode(mode.ReadWrite)) require.Equal(t, uint32(0), errCount.Load()) - require.Error(t, wc.Flush(false)) + require.Error(t, wc.Flush(context.Background(), false)) require.True(t, errCount.Load() > 0) - require.NoError(t, wc.Flush(true)) + require.NoError(t, wc.Flush(context.Background(), true)) check(t, mb, bs, objects) } @@ -202,7 +202,7 @@ func TestFlush(t *testing.T) { prm.Address = objectCore.AddressOf(obj) prm.RawData = data - _, err := c.fsTree.Put(prm) + _, err := c.fsTree.Put(context.Background(), prm) require.NoError(t, err) p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString() @@ -218,7 +218,7 @@ func TestFlush(t *testing.T) { var prm common.PutPrm prm.Address = oidtest.Address() prm.RawData = []byte{1, 2, 3} - _, err := c.fsTree.Put(prm) + _, err := c.fsTree.Put(context.Background(), prm) require.NoError(t, err) }) }) @@ -245,19 +245,19 @@ func TestFlush(t *testing.T) { for i := range objects { var prm meta.PutPrm prm.SetObject(objects[i].obj) - _, err := mb.Put(prm) + _, err := mb.Put(context.Background(), prm) require.NoError(t, err) } var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(objects[0].addr, objects[1].addr) inhumePrm.SetTombstoneAddress(oidtest.Address()) - _, err := mb.Inhume(inhumePrm) + _, err := mb.Inhume(context.Background(), inhumePrm) require.NoError(t, err) var deletePrm meta.DeletePrm deletePrm.SetAddresses(objects[2].addr, objects[3].addr) - _, err = mb.Delete(deletePrm) + _, err = mb.Delete(context.Background(), deletePrm) require.NoError(t, err) require.NoError(t, bs.SetMode(mode.ReadOnly)) @@ -294,7 +294,7 @@ func putObject(t *testing.T, c Cache, size int) objectPair { prm.Object = obj prm.RawData = data - _, err := c.Put(prm) + _, err := c.Put(context.Background(), prm) require.NoError(t, err) return objectPair{prm.Address, prm.Object} diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go index ffe7a0129..2ca8cceef 100644 --- a/pkg/local_object_storage/writecache/init.go +++ b/pkg/local_object_storage/writecache/init.go @@ -5,6 +5,7 @@ import ( "errors" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -14,21 +15,21 @@ import ( "go.uber.org/zap" ) -func (c *cache) initFlushMarks() { +func (c *cache) initFlushMarks(ctx context.Context) { var localWG sync.WaitGroup localWG.Add(1) go func() { defer localWG.Done() - c.fsTreeFlushMarkUpdate() + c.fsTreeFlushMarkUpdate(ctx) }() localWG.Add(1) go func() { defer localWG.Done() - c.dbFlushMarkUpdate() + c.dbFlushMarkUpdate(ctx) }() c.initWG.Add(1) @@ -53,8 +54,8 @@ func (c *cache) initFlushMarks() { var errStopIter = errors.New("stop iteration") -func (c *cache) fsTreeFlushMarkUpdate() { - c.log.Info("filling flush marks for objects in FSTree") +func (c *cache) fsTreeFlushMarkUpdate(ctx context.Context) { + c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree) var prm common.IteratePrm prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error { @@ -66,14 +67,14 @@ func (c *cache) fsTreeFlushMarkUpdate() { default: } - flushed, needRemove := c.flushStatus(addr) + flushed, needRemove := c.flushStatus(ctx, addr) if flushed { c.store.flushed.Add(addr.EncodeToString(), true) if needRemove { var prm common.DeletePrm prm.Address = addr - _, err := c.fsTree.Delete(prm) + _, err := c.fsTree.Delete(ctx, prm) if err == nil { storagelog.Write(c.log, storagelog.AddressField(addr), @@ -85,12 +86,20 @@ func (c *cache) fsTreeFlushMarkUpdate() { } return nil } + + c.modeMtx.RLock() + defer c.modeMtx.RUnlock() + _, _ = c.fsTree.Iterate(prm) - c.log.Info("finished updating FSTree flush marks") + + c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks) } -func (c *cache) dbFlushMarkUpdate() { - c.log.Info("filling flush marks for objects in database") +func (c *cache) dbFlushMarkUpdate(ctx context.Context) { + c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase) + + c.modeMtx.RLock() + defer c.modeMtx.RUnlock() var m []string var indices []int @@ -124,7 +133,7 @@ func (c *cache) dbFlushMarkUpdate() { continue } - flushed, needRemove := c.flushStatus(addr) + flushed, needRemove := c.flushStatus(ctx, addr) if flushed { c.store.flushed.Add(addr.EncodeToString(), true) if needRemove { @@ -158,17 +167,17 @@ func (c *cache) dbFlushMarkUpdate() { lastKey = append([]byte(m[len(m)-1]), 0) } - c.log.Info("finished updating flush marks") + c.log.Info(logs.WritecacheFinishedUpdatingFlushMarks) } // flushStatus returns info about the object state in the main storage. // First return value is true iff object exists. // Second return value is true iff object can be safely removed. -func (c *cache) flushStatus(addr oid.Address) (bool, bool) { +func (c *cache) flushStatus(ctx context.Context, addr oid.Address) (bool, bool) { var existsPrm meta.ExistsPrm existsPrm.SetAddress(addr) - _, err := c.metabase.Exists(existsPrm) + _, err := c.metabase.Exists(ctx, existsPrm) if err != nil { needRemove := errors.Is(err, meta.ErrObjectIsExpired) || errors.As(err, new(apistatus.ObjectAlreadyRemoved)) return needRemove, needRemove @@ -177,7 +186,7 @@ func (c *cache) flushStatus(addr oid.Address) (bool, bool) { var prm meta.StorageIDPrm prm.SetAddress(addr) - mRes, _ := c.metabase.StorageID(prm) - res, err := c.blobstor.Exists(context.TODO(), common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()}) + mRes, _ := c.metabase.StorageID(ctx, prm) + res, err := c.blobstor.Exists(ctx, common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()}) return err == nil && res.Exists, false } diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 997310d9e..14f8af49e 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -1,11 +1,16 @@ package writecache import ( + "context" "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // ErrReadOnly is returned when Put/Write is performed in a read-only mode. @@ -18,24 +23,20 @@ var ErrNotInitialized = logicerr.New("write-cache is not initialized yet") // When shard is put in read-only mode all objects in memory are flushed to disk // and all background jobs are suspended. func (c *cache) SetMode(m mode.Mode) error { - c.modeMtx.Lock() - defer c.modeMtx.Unlock() + ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode", + trace.WithAttributes( + attribute.String("mode", m.String()), + )) + defer span.End() - return c.setMode(m) + return c.setMode(ctx, m) } // setMode applies new mode. Must be called with cache.modeMtx lock taken. -func (c *cache) setMode(m mode.Mode) error { +func (c *cache) setMode(ctx context.Context, m mode.Mode) error { var err error turnOffMeta := m.NoMetabase() - if turnOffMeta && !c.mode.NoMetabase() { - err = c.flush(true) - if err != nil { - return err - } - } - if !c.initialized.Load() { close(c.stopInitCh) @@ -44,11 +45,21 @@ func (c *cache) setMode(m mode.Mode) error { defer func() { if err == nil && !turnOffMeta { - c.initFlushMarks() + c.initFlushMarks(ctx) } }() } + c.modeMtx.Lock() + defer c.modeMtx.Unlock() + + if turnOffMeta && !c.mode.NoMetabase() { + err = c.flush(ctx, true) + if err != nil { + return err + } + } + if c.db != nil { if err = c.db.Close(); err != nil { return fmt.Errorf("can't close write-cache database: %w", err) @@ -59,7 +70,7 @@ func (c *cache) setMode(m mode.Mode) error { // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty // guarantees that there are no in-fly operations. for len(c.flushCh) != 0 { - c.log.Info("waiting for channels to flush") + c.log.Info(logs.WritecacheWaitingForChannelsToFlush) time.Sleep(time.Second) } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index cca8986b3..3434e9355 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -19,14 +19,14 @@ type Option func(*options) // meta is an interface for a metabase. type metabase interface { - Exists(meta.ExistsPrm) (meta.ExistsRes, error) - StorageID(meta.StorageIDPrm) (meta.StorageIDRes, error) + Exists(context.Context, meta.ExistsPrm) (meta.ExistsRes, error) + StorageID(context.Context, meta.StorageIDPrm) (meta.StorageIDRes, error) UpdateStorageID(meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error) } // blob is an interface for the blobstor. type blob interface { - Put(common.PutPrm) (common.PutRes, error) + Put(context.Context, common.PutPrm) (common.PutRes, error) NeedsCompression(obj *objectSDK.Object) bool Exists(ctx context.Context, res common.ExistsPrm) (common.ExistsRes, error) } diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 7791e93dc..e2535d9e2 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -1,11 +1,15 @@ package writecache import ( + "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "go.etcd.io/bbolt" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) var ( @@ -21,7 +25,14 @@ var ( // Returns ErrNotInitialized if write-cache has not been initialized yet. // Returns ErrOutOfSpace if saving an object leads to WC's size overflow. // Returns ErrBigObject if an objects exceeds maximum object size. -func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) { +func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put", + trace.WithAttributes( + attribute.String("address", prm.Address.EncodeToString()), + attribute.Bool("dont_compress", prm.DontCompress), + )) + defer span.End() + c.modeMtx.RLock() defer c.modeMtx.RUnlock() if c.readOnly() { @@ -44,7 +55,7 @@ func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) { if sz <= c.smallObjectSize { return common.PutRes{}, c.putSmall(oi) } - return common.PutRes{}, c.putBig(oi.addr, prm) + return common.PutRes{}, c.putBig(ctx, oi.addr, prm) } // putSmall persists small objects to the write-cache database and @@ -71,13 +82,13 @@ func (c *cache) putSmall(obj objectInfo) error { } // putBig writes object to FSTree and pushes it to the flush workers queue. -func (c *cache) putBig(addr string, prm common.PutPrm) error { +func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error { cacheSz := c.estimateCacheSize() if c.maxCacheSize < c.incSizeFS(cacheSz) { return ErrOutOfSpace } - _, err := c.fsTree.Put(prm) + _, err := c.fsTree.Put(ctx, prm) if err != nil { return err } diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index 667d34cb9..aeae752e3 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -1,10 +1,12 @@ package writecache import ( + "context" "errors" "fmt" "os" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" @@ -124,7 +126,7 @@ func (c *cache) deleteFromDB(keys []string) []string { ) } if err != nil { - c.log.Error("can't remove objects from the database", zap.Error(err)) + c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err)) } copy(keys, keys[errorIndex:]) @@ -141,13 +143,13 @@ func (c *cache) deleteFromDisk(keys []string) []string { for i := range keys { if err := addr.DecodeString(keys[i]); err != nil { - c.log.Error("can't parse address", zap.String("address", keys[i])) + c.log.Error(logs.WritecacheCantParseAddress, zap.String("address", keys[i])) continue } - _, err := c.fsTree.Delete(common.DeletePrm{Address: addr}) + _, err := c.fsTree.Delete(context.TODO(), common.DeletePrm{Address: addr}) if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) { - c.log.Error("can't remove object from write-cache", zap.Error(err)) + c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) // Save the key for the next iteration. keys[copyIndex] = keys[i] diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 24070dbda..bdcc9bbf6 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -32,13 +33,13 @@ type Cache interface { // // Returns apistatus.ObjectNotFound if object is missing in the Cache. // Returns ErrReadOnly if the Cache is currently in the read-only mode. - Delete(oid.Address) error + Delete(context.Context, oid.Address) error Iterate(IterationPrm) error - Put(common.PutPrm) (common.PutRes, error) + Put(context.Context, common.PutPrm) (common.PutRes, error) SetMode(mode.Mode) error SetLogger(*logger.Logger) DumpInfo() Info - Flush(bool) error + Flush(context.Context, bool) error Init() error Open(readOnly bool) error @@ -152,18 +153,18 @@ func (c *cache) Open(readOnly bool) error { // Init runs necessary services. func (c *cache) Init() error { - c.initFlushMarks() + ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.Init") + defer span.End() + + c.initFlushMarks(ctx) c.runFlushLoop() return nil } // Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op. func (c *cache) Close() error { - c.modeMtx.Lock() - defer c.modeMtx.Unlock() - // Finish all in-progress operations. - if err := c.setMode(mode.ReadOnly); err != nil { + if err := c.setMode(context.TODO(), mode.ReadOnly); err != nil { return err } diff --git a/pkg/metrics/desc.go b/pkg/metrics/desc.go new file mode 100644 index 000000000..74d2d4e6e --- /dev/null +++ b/pkg/metrics/desc.go @@ -0,0 +1,71 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +type metric[T prometheus.Collector] struct { + value T + desc Description +} + +// Descriptions contains metric description suitable for further processing. +// The only reason for it to exist is `prometheus.Desc` disallowing field access directly. +// https://github.com/prometheus/client_golang/pull/326 +// https://github.com/prometheus/client_golang/issues/516 +// https://github.com/prometheus/client_golang/issues/222 +type Description struct { + Name string `json:"name"` + Help string `json:"help"` + Type string `json:"type"` + ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"` + VariableLabels []string `json:"variable_labels,omitempty"` +} + +func newGauge(opts prometheus.GaugeOpts) metric[prometheus.Gauge] { + return metric[prometheus.Gauge]{ + value: prometheus.NewGauge(opts), + desc: Description{ + Name: prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + Type: dto.MetricType_GAUGE.String(), + Help: opts.Help, + ConstantLabels: opts.ConstLabels, + }, + } +} + +func newGaugeVec(opts prometheus.GaugeOpts, labelNames []string) metric[*prometheus.GaugeVec] { + return metric[*prometheus.GaugeVec]{ + value: prometheus.NewGaugeVec(opts, labelNames), + desc: Description{ + Name: prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + Type: dto.MetricType_GAUGE.String(), + Help: opts.Help, + ConstantLabels: opts.ConstLabels, + VariableLabels: labelNames, + }, + } +} + +func newCounter(opts prometheus.CounterOpts) metric[prometheus.Counter] { + return metric[prometheus.Counter]{ + value: prometheus.NewCounter(opts), + desc: Description{ + Name: prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + Type: dto.MetricType_COUNTER.String(), + Help: opts.Help, + ConstantLabels: opts.ConstLabels, + }, + } +} + +// DescribeAll returns descriptions for all registered metrics. +func DescribeAll() ([]Description, error) { + registeredDescriptionsMtx.Lock() + defer registeredDescriptionsMtx.Unlock() + + ds := make([]Description, len(registeredDescriptions)) + copy(ds, registeredDescriptions) + return ds, nil +} diff --git a/pkg/metrics/desc_test.go b/pkg/metrics/desc_test.go new file mode 100644 index 000000000..28b5e2132 --- /dev/null +++ b/pkg/metrics/desc_test.go @@ -0,0 +1,65 @@ +package metrics + +import ( + "strings" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" +) + +func TestDescribeAll(t *testing.T) { + const ( + namespace = "my_ns" + subsystem = "mysub" + ) + mustRegister(newCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "my_counter", + })) + + labels := []string{"label1", "label2"} + mustRegister(newGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "my_gauge", + }, labels)) + + constLabels := prometheus.Labels{ + "const1": "abc", + "const2": "xyz", + } + mustRegister(newCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "with_const_labels", + ConstLabels: constLabels, + })) + + descriptions, err := DescribeAll() + require.NoError(t, err) + + seen := make(map[string]bool) + for i := range descriptions { + if !strings.HasPrefix(descriptions[i].Name, namespace) { + continue + } + + require.False(t, seen[descriptions[i].Name], "metric %s was seen twice", descriptions[i].Name) + seen[descriptions[i].Name] = true + + switch descriptions[i].Name { + case prometheus.BuildFQName(namespace, subsystem, "my_counter"): + require.True(t, len(descriptions[i].VariableLabels) == 0) + case prometheus.BuildFQName(namespace, subsystem, "my_gauge"): + require.Equal(t, labels, descriptions[i].VariableLabels) + case prometheus.BuildFQName(namespace, subsystem, "with_const_labels"): + require.Equal(t, len(constLabels), len(descriptions[i].ConstantLabels)) + require.Equal(t, constLabels, descriptions[i].ConstantLabels) + default: + require.FailNow(t, "unexpected metric name: %s", descriptions[i].Name) + } + } + require.Equal(t, 3, len(seen), "not all registered metrics were iterated over") +} diff --git a/pkg/metrics/engine.go b/pkg/metrics/engine.go index 4c51a55aa..28fc1e028 100644 --- a/pkg/metrics/engine.go +++ b/pkg/metrics/engine.go @@ -10,19 +10,19 @@ import ( type ( engineMetrics struct { - listContainersDuration prometheus.Counter - estimateContainerSizeDuration prometheus.Counter - deleteDuration prometheus.Counter - existsDuration prometheus.Counter - getDuration prometheus.Counter - headDuration prometheus.Counter - inhumeDuration prometheus.Counter - putDuration prometheus.Counter - rangeDuration prometheus.Counter - searchDuration prometheus.Counter - listObjectsDuration prometheus.Counter - containerSize prometheus.GaugeVec - payloadSize prometheus.GaugeVec + listContainersDuration metric[prometheus.Counter] + estimateContainerSizeDuration metric[prometheus.Counter] + deleteDuration metric[prometheus.Counter] + existsDuration metric[prometheus.Counter] + getDuration metric[prometheus.Counter] + headDuration metric[prometheus.Counter] + inhumeDuration metric[prometheus.Counter] + putDuration metric[prometheus.Counter] + rangeDuration metric[prometheus.Counter] + searchDuration metric[prometheus.Counter] + listObjectsDuration metric[prometheus.Counter] + containerSize metric[*prometheus.GaugeVec] + payloadSize metric[*prometheus.GaugeVec] } ) @@ -41,13 +41,13 @@ func newEngineMetrics() engineMetrics { rangeDuration: newEngineMethodDurationCounter("range"), searchDuration: newEngineMethodDurationCounter("search"), listObjectsDuration: newEngineMethodDurationCounter("list_objects"), - containerSize: *newEngineGaugeVector("container_size", "Accumulated size of all objects in a container", []string{containerIDLabelKey}), - payloadSize: *newEngineGaugeVector("payload_size", "Accumulated size of all objects in a shard", []string{shardIDLabelKey}), + containerSize: newEngineGaugeVector("container_size", "Accumulated size of all objects in a container", []string{containerIDLabelKey}), + payloadSize: newEngineGaugeVector("payload_size", "Accumulated size of all objects in a shard", []string{shardIDLabelKey}), } } -func newEngineCounter(name, help string) prometheus.Counter { - return prometheus.NewCounter(prometheus.CounterOpts{ +func newEngineCounter(name, help string) metric[prometheus.Counter] { + return newCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: engineSubsystem, Name: name, @@ -55,15 +55,15 @@ func newEngineCounter(name, help string) prometheus.Counter { }) } -func newEngineMethodDurationCounter(method string) prometheus.Counter { +func newEngineMethodDurationCounter(method string) metric[prometheus.Counter] { return newEngineCounter( fmt.Sprintf("%s_duration", method), fmt.Sprintf("Accumulated duration of engine %s operations", strings.ReplaceAll(method, "_", " ")), ) } -func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec { - return prometheus.NewGaugeVec(prometheus.GaugeOpts{ +func newEngineGaugeVector(name, help string, labels []string) metric[*prometheus.GaugeVec] { + return newGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: engineSubsystem, Name: name, @@ -72,69 +72,69 @@ func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeV } func (m engineMetrics) register() { - prometheus.MustRegister(m.listContainersDuration) - prometheus.MustRegister(m.estimateContainerSizeDuration) - prometheus.MustRegister(m.deleteDuration) - prometheus.MustRegister(m.existsDuration) - prometheus.MustRegister(m.getDuration) - prometheus.MustRegister(m.headDuration) - prometheus.MustRegister(m.inhumeDuration) - prometheus.MustRegister(m.putDuration) - prometheus.MustRegister(m.rangeDuration) - prometheus.MustRegister(m.searchDuration) - prometheus.MustRegister(m.listObjectsDuration) - prometheus.MustRegister(m.containerSize) - prometheus.MustRegister(m.payloadSize) + mustRegister(m.listContainersDuration) + mustRegister(m.estimateContainerSizeDuration) + mustRegister(m.deleteDuration) + mustRegister(m.existsDuration) + mustRegister(m.getDuration) + mustRegister(m.headDuration) + mustRegister(m.inhumeDuration) + mustRegister(m.putDuration) + mustRegister(m.rangeDuration) + mustRegister(m.searchDuration) + mustRegister(m.listObjectsDuration) + mustRegister(m.containerSize) + mustRegister(m.payloadSize) } func (m engineMetrics) AddListContainersDuration(d time.Duration) { - m.listObjectsDuration.Add(float64(d)) + m.listObjectsDuration.value.Add(float64(d)) } func (m engineMetrics) AddEstimateContainerSizeDuration(d time.Duration) { - m.estimateContainerSizeDuration.Add(float64(d)) + m.estimateContainerSizeDuration.value.Add(float64(d)) } func (m engineMetrics) AddDeleteDuration(d time.Duration) { - m.deleteDuration.Add(float64(d)) + m.deleteDuration.value.Add(float64(d)) } func (m engineMetrics) AddExistsDuration(d time.Duration) { - m.existsDuration.Add(float64(d)) + m.existsDuration.value.Add(float64(d)) } func (m engineMetrics) AddGetDuration(d time.Duration) { - m.getDuration.Add(float64(d)) + m.getDuration.value.Add(float64(d)) } func (m engineMetrics) AddHeadDuration(d time.Duration) { - m.headDuration.Add(float64(d)) + m.headDuration.value.Add(float64(d)) } func (m engineMetrics) AddInhumeDuration(d time.Duration) { - m.inhumeDuration.Add(float64(d)) + m.inhumeDuration.value.Add(float64(d)) } func (m engineMetrics) AddPutDuration(d time.Duration) { - m.putDuration.Add(float64(d)) + m.putDuration.value.Add(float64(d)) } func (m engineMetrics) AddRangeDuration(d time.Duration) { - m.rangeDuration.Add(float64(d)) + m.rangeDuration.value.Add(float64(d)) } func (m engineMetrics) AddSearchDuration(d time.Duration) { - m.searchDuration.Add(float64(d)) + m.searchDuration.value.Add(float64(d)) } func (m engineMetrics) AddListObjectsDuration(d time.Duration) { - m.listObjectsDuration.Add(float64(d)) + m.listObjectsDuration.value.Add(float64(d)) } func (m engineMetrics) AddToContainerSize(cnrID string, size int64) { - m.containerSize.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size)) + m.containerSize.value.With(prometheus.Labels{containerIDLabelKey: cnrID}).Add(float64(size)) } func (m engineMetrics) AddToPayloadCounter(shardID string, size int64) { - m.payloadSize.With(prometheus.Labels{shardIDLabelKey: shardID}).Add(float64(size)) + m.payloadSize.value.With(prometheus.Labels{shardIDLabelKey: shardID}).Add(float64(size)) } diff --git a/pkg/metrics/innerring.go b/pkg/metrics/innerring.go index 55b0aa089..79db424b8 100644 --- a/pkg/metrics/innerring.go +++ b/pkg/metrics/innerring.go @@ -6,20 +6,20 @@ const innerRingSubsystem = "ir" // InnerRingServiceMetrics contains metrics collected by inner ring. type InnerRingServiceMetrics struct { - epoch prometheus.Gauge - health prometheus.Gauge + epoch metric[prometheus.Gauge] + health metric[prometheus.Gauge] } // NewInnerRingMetrics returns new instance of metrics collectors for inner ring. -func NewInnerRingMetrics() InnerRingServiceMetrics { +func NewInnerRingMetrics() *InnerRingServiceMetrics { var ( - epoch = prometheus.NewGauge(prometheus.GaugeOpts{ + epoch = newGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: innerRingSubsystem, Name: "epoch", Help: "Current epoch as seen by inner-ring node.", }) - health = prometheus.NewGauge(prometheus.GaugeOpts{ + health = newGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: innerRingSubsystem, Name: "health", @@ -27,10 +27,10 @@ func NewInnerRingMetrics() InnerRingServiceMetrics { }) ) - prometheus.MustRegister(epoch) - prometheus.MustRegister(health) + mustRegister(epoch) + mustRegister(health) - return InnerRingServiceMetrics{ + return &InnerRingServiceMetrics{ epoch: epoch, health: health, } @@ -38,10 +38,10 @@ func NewInnerRingMetrics() InnerRingServiceMetrics { // SetEpoch updates epoch metrics. func (m InnerRingServiceMetrics) SetEpoch(epoch uint64) { - m.epoch.Set(float64(epoch)) + m.epoch.value.Set(float64(epoch)) } // SetHealth updates health metrics. func (m InnerRingServiceMetrics) SetHealth(s int32) { - m.health.Set(float64(s)) + m.health.value.Set(float64(s)) } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/node.go similarity index 76% rename from pkg/metrics/metrics.go rename to pkg/metrics/node.go index 640da2d5f..bf12e610f 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/node.go @@ -8,7 +8,8 @@ type NodeMetrics struct { objectServiceMetrics engineMetrics stateMetrics - epoch prometheus.Gauge + replicatorMetrics + epoch metric[prometheus.Gauge] } func NewNodeMetrics() *NodeMetrics { @@ -21,23 +22,27 @@ func NewNodeMetrics() *NodeMetrics { state := newStateMetrics() state.register() - epoch := prometheus.NewGauge(prometheus.GaugeOpts{ + replicator := newReplicatorMetrics() + replicator.register() + + epoch := newGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: innerRingSubsystem, Name: "epoch", Help: "Current epoch as seen by inner-ring node.", }) - prometheus.MustRegister(epoch) + mustRegister(epoch) return &NodeMetrics{ objectServiceMetrics: objectService, engineMetrics: engine, stateMetrics: state, + replicatorMetrics: replicator, epoch: epoch, } } // SetEpoch updates epoch metric. func (m *NodeMetrics) SetEpoch(epoch uint64) { - m.epoch.Set(float64(epoch)) + m.epoch.value.Set(float64(epoch)) } diff --git a/pkg/metrics/object.go b/pkg/metrics/object.go index fae86cb4a..5ec575749 100644 --- a/pkg/metrics/object.go +++ b/pkg/metrics/object.go @@ -12,8 +12,8 @@ const objectSubsystem = "object" type ( methodCount struct { - success prometheus.Counter - total prometheus.Counter + success metric[prometheus.Counter] + total metric[prometheus.Counter] } objectServiceMetrics struct { @@ -25,19 +25,19 @@ type ( rangeCounter methodCount rangeHashCounter methodCount - getDuration prometheus.Counter - putDuration prometheus.Counter - headDuration prometheus.Counter - searchDuration prometheus.Counter - deleteDuration prometheus.Counter - rangeDuration prometheus.Counter - rangeHashDuration prometheus.Counter + getDuration metric[prometheus.Counter] + putDuration metric[prometheus.Counter] + headDuration metric[prometheus.Counter] + searchDuration metric[prometheus.Counter] + deleteDuration metric[prometheus.Counter] + rangeDuration metric[prometheus.Counter] + rangeHashDuration metric[prometheus.Counter] - putPayload prometheus.Counter - getPayload prometheus.Counter + putPayload metric[prometheus.Counter] + getPayload metric[prometheus.Counter] - shardMetrics *prometheus.GaugeVec - shardsReadonly *prometheus.GaugeVec + shardMetrics metric[*prometheus.GaugeVec] + shardsReadonly metric[*prometheus.GaugeVec] } ) @@ -49,13 +49,13 @@ const ( func newObjectMethodCallCounter(name string) methodCount { return methodCount{ - success: prometheus.NewCounter(prometheus.CounterOpts{ + success: newCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: objectSubsystem, Name: fmt.Sprintf("%s_req_count_success", name), Help: fmt.Sprintf("The number of successful %s requests processed", name), }), - total: prometheus.NewCounter(prometheus.CounterOpts{ + total: newCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: objectSubsystem, Name: fmt.Sprintf("%s_req_count", name), @@ -65,14 +65,14 @@ func newObjectMethodCallCounter(name string) methodCount { } func (m methodCount) mustRegister() { - prometheus.MustRegister(m.success) - prometheus.MustRegister(m.total) + mustRegister(m.success) + mustRegister(m.total) } func (m methodCount) Inc(success bool) { - m.total.Inc() + m.total.value.Inc() if success { - m.success.Inc() + m.success.value.Inc() } } @@ -99,8 +99,8 @@ func newObjectServiceMetrics() objectServiceMetrics { } } -func newObjectMethodPayloadCounter(method string) prometheus.Counter { - return prometheus.NewCounter(prometheus.CounterOpts{ +func newObjectMethodPayloadCounter(method string) metric[prometheus.Counter] { + return newCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: objectSubsystem, Name: fmt.Sprintf("%s_payload", method), @@ -108,8 +108,8 @@ func newObjectMethodPayloadCounter(method string) prometheus.Counter { }) } -func newObjectMethodDurationCounter(method string) prometheus.Counter { - return prometheus.NewCounter(prometheus.CounterOpts{ +func newObjectMethodDurationCounter(method string) metric[prometheus.Counter] { + return newCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: objectSubsystem, Name: fmt.Sprintf("%s_req_duration", method), @@ -117,8 +117,8 @@ func newObjectMethodDurationCounter(method string) prometheus.Counter { }) } -func newObjectGaugeVector(name, help string, labels []string) *prometheus.GaugeVec { - return prometheus.NewGaugeVec(prometheus.GaugeOpts{ +func newObjectGaugeVector(name, help string, labels []string) metric[*prometheus.GaugeVec] { + return newGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: objectSubsystem, Name: name, @@ -135,19 +135,19 @@ func (m objectServiceMetrics) register() { m.rangeCounter.mustRegister() m.rangeHashCounter.mustRegister() - prometheus.MustRegister(m.getDuration) - prometheus.MustRegister(m.putDuration) - prometheus.MustRegister(m.headDuration) - prometheus.MustRegister(m.searchDuration) - prometheus.MustRegister(m.deleteDuration) - prometheus.MustRegister(m.rangeDuration) - prometheus.MustRegister(m.rangeHashDuration) + mustRegister(m.getDuration) + mustRegister(m.putDuration) + mustRegister(m.headDuration) + mustRegister(m.searchDuration) + mustRegister(m.deleteDuration) + mustRegister(m.rangeDuration) + mustRegister(m.rangeHashDuration) - prometheus.MustRegister(m.putPayload) - prometheus.MustRegister(m.getPayload) + mustRegister(m.putPayload) + mustRegister(m.getPayload) - prometheus.MustRegister(m.shardMetrics) - prometheus.MustRegister(m.shardsReadonly) + mustRegister(m.shardMetrics) + mustRegister(m.shardsReadonly) } func (m objectServiceMetrics) IncGetReqCounter(success bool) { @@ -179,43 +179,43 @@ func (m objectServiceMetrics) IncRangeHashReqCounter(success bool) { } func (m objectServiceMetrics) AddGetReqDuration(d time.Duration) { - m.getDuration.Add(float64(d)) + m.getDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddPutReqDuration(d time.Duration) { - m.putDuration.Add(float64(d)) + m.putDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddHeadReqDuration(d time.Duration) { - m.headDuration.Add(float64(d)) + m.headDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddSearchReqDuration(d time.Duration) { - m.searchDuration.Add(float64(d)) + m.searchDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddDeleteReqDuration(d time.Duration) { - m.deleteDuration.Add(float64(d)) + m.deleteDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddRangeReqDuration(d time.Duration) { - m.rangeDuration.Add(float64(d)) + m.rangeDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddRangeHashReqDuration(d time.Duration) { - m.rangeHashDuration.Add(float64(d)) + m.rangeHashDuration.value.Add(float64(d)) } func (m objectServiceMetrics) AddPutPayload(ln int) { - m.putPayload.Add(float64(ln)) + m.putPayload.value.Add(float64(ln)) } func (m objectServiceMetrics) AddGetPayload(ln int) { - m.getPayload.Add(float64(ln)) + m.getPayload.value.Add(float64(ln)) } func (m objectServiceMetrics) AddToObjectCounter(shardID, objectType string, delta int) { - m.shardMetrics.With( + m.shardMetrics.value.With( prometheus.Labels{ shardIDLabelKey: shardID, counterTypeLabelKey: objectType, @@ -224,7 +224,7 @@ func (m objectServiceMetrics) AddToObjectCounter(shardID, objectType string, del } func (m objectServiceMetrics) SetObjectCounter(shardID, objectType string, v uint64) { - m.shardMetrics.With( + m.shardMetrics.value.With( prometheus.Labels{ shardIDLabelKey: shardID, counterTypeLabelKey: objectType, @@ -237,7 +237,7 @@ func (m objectServiceMetrics) SetReadonly(shardID string, readonly bool) { if readonly { flag = 1 } - m.shardsReadonly.With( + m.shardsReadonly.value.With( prometheus.Labels{ shardIDLabelKey: shardID, }, diff --git a/pkg/metrics/registry.go b/pkg/metrics/registry.go new file mode 100644 index 000000000..eef613d04 --- /dev/null +++ b/pkg/metrics/registry.go @@ -0,0 +1,42 @@ +package metrics + +import ( + "net/http" + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// Handler returns an http.Handler for the local registry. +func Handler() http.Handler { + promhttp.Handler() + return promhttp.InstrumentMetricHandler( + registry, + promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) +} + +var ( + registry = prometheus.NewRegistry() + // registeredDescriptionsMtx protects collectors slice. + // It should not be acessed concurrently, but we can easily forget this in future, thus this mutex. + registeredDescriptionsMtx sync.Mutex + registeredDescriptions []Description +) + +func init() { + registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + registry.MustRegister(collectors.NewGoCollector()) +} + +func mustRegister[T prometheus.Collector](cs ...metric[T]) { + for i := range cs { + registry.MustRegister(cs[i].value) + } + registeredDescriptionsMtx.Lock() + for i := range cs { + registeredDescriptions = append(registeredDescriptions, cs[i].desc) + } + registeredDescriptionsMtx.Unlock() +} diff --git a/pkg/metrics/replicator.go b/pkg/metrics/replicator.go new file mode 100644 index 000000000..55f736c66 --- /dev/null +++ b/pkg/metrics/replicator.go @@ -0,0 +1,59 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +const replicatorSubsystem = "replicator" + +type replicatorMetrics struct { + inFlightRequests metric[prometheus.Gauge] + processedObjects metric[prometheus.Counter] + totalReplicatedPayloadSize metric[prometheus.Counter] +} + +func (m replicatorMetrics) IncInFlightRequest() { + m.inFlightRequests.value.Inc() +} + +func (m replicatorMetrics) DecInFlightRequest() { + m.inFlightRequests.value.Dec() +} + +func (m replicatorMetrics) IncProcessedObjects() { + m.processedObjects.value.Inc() +} + +func (m replicatorMetrics) AddPayloadSize(size int64) { + m.totalReplicatedPayloadSize.value.Add(float64(size)) +} + +func newReplicatorMetrics() replicatorMetrics { + return replicatorMetrics{ + inFlightRequests: newReplicatorGauge("in_flight_requests", "Number of in-flight requests"), + processedObjects: newReplicatorCounter("processed_objects", "Number of objects processed since the node startup"), + totalReplicatedPayloadSize: newReplicatorCounter("total_replicated_payload_size", "Total size of payloads replicated"), + } +} + +func (m replicatorMetrics) register() { + mustRegister(m.inFlightRequests) + mustRegister(m.processedObjects) + mustRegister(m.totalReplicatedPayloadSize) +} + +func newReplicatorCounter(name, help string) metric[prometheus.Counter] { + return newCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: replicatorSubsystem, + Name: name, + Help: help, + }) +} + +func newReplicatorGauge(name, help string) metric[prometheus.Gauge] { + return newGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: replicatorSubsystem, + Name: name, + Help: help, + }) +} diff --git a/pkg/metrics/state.go b/pkg/metrics/state.go index 94e28af38..dce0402cd 100644 --- a/pkg/metrics/state.go +++ b/pkg/metrics/state.go @@ -5,12 +5,12 @@ import "github.com/prometheus/client_golang/prometheus" const stateSubsystem = "state" type stateMetrics struct { - healthCheck prometheus.Gauge + healthCheck metric[prometheus.Gauge] } func newStateMetrics() stateMetrics { return stateMetrics{ - healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{ + healthCheck: newGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: stateSubsystem, Name: "health", @@ -20,9 +20,9 @@ func newStateMetrics() stateMetrics { } func (m stateMetrics) register() { - prometheus.MustRegister(m.healthCheck) + mustRegister(m.healthCheck) } func (m stateMetrics) SetHealth(s int32) { - m.healthCheck.Set(float64(s)) + m.healthCheck.value.Set(float64(s)) } diff --git a/pkg/morph/client/audit/client.go b/pkg/morph/client/audit/client.go deleted file mode 100644 index b922fc792..000000000 --- a/pkg/morph/client/audit/client.go +++ /dev/null @@ -1,40 +0,0 @@ -package audit - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS Audit contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static Audit contract client -} - -const ( - putResultMethod = "put" - getResultMethod = "get" - listResultsMethod = "list" - listByEpochResultsMethod = "listByEpoch" - listByCIDResultsMethod = "listByCID" - listByNodeResultsMethod = "listByNode" -) - -// NewFromMorph returns the wrapper instance from the raw morph client. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...client.StaticClientOption) (*Client, error) { - sc, err := client.NewStatic(cli, contract, fee, opts...) - if err != nil { - return nil, fmt.Errorf("could not create static client of audit contract: %w", err) - } - - return &Client{client: sc}, nil -} diff --git a/pkg/morph/client/audit/get_result.go b/pkg/morph/client/audit/get_result.go deleted file mode 100644 index 9e1b75db4..000000000 --- a/pkg/morph/client/audit/get_result.go +++ /dev/null @@ -1,34 +0,0 @@ -package audit - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" -) - -// GetAuditResult returns audit result structure stored in audit contract. -func (c *Client) GetAuditResult(id ResultID) (*auditAPI.Result, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(getResultMethod) - prm.SetArgs([]byte(id)) - - prms, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getResultMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", getResultMethod, ln) - } - - value, err := client.BytesFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", getResultMethod, err) - } - - var auditRes auditAPI.Result - if err := auditRes.Unmarshal(value); err != nil { - return nil, fmt.Errorf("could not unmarshal audit result structure: %w", err) - } - - return &auditRes, nil -} diff --git a/pkg/morph/client/audit/list_results.go b/pkg/morph/client/audit/list_results.go deleted file mode 100644 index ace01d15b..000000000 --- a/pkg/morph/client/audit/list_results.go +++ /dev/null @@ -1,93 +0,0 @@ -package audit - -import ( - "crypto/sha256" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// ListAllAuditResultID returns a list of all audit result IDs inside audit contract. -func (c *Client) ListAllAuditResultID() ([]ResultID, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(listResultsMethod) - - items, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listResultsMethod, err) - } - return parseAuditResults(items, listResultsMethod) -} - -// ListAuditResultIDByEpoch returns a list of audit result IDs inside audit -// contract for specific epoch number. -func (c *Client) ListAuditResultIDByEpoch(epoch uint64) ([]ResultID, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(listByEpochResultsMethod) - prm.SetArgs(epoch) - - items, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByEpochResultsMethod, err) - } - return parseAuditResults(items, listByEpochResultsMethod) -} - -// ListAuditResultIDByCID returns a list of audit result IDs inside audit -// contract for specific epoch number and container ID. -func (c *Client) ListAuditResultIDByCID(epoch uint64, cnr cid.ID) ([]ResultID, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - prm := client.TestInvokePrm{} - prm.SetMethod(listByCIDResultsMethod) - prm.SetArgs(epoch, binCnr) - - items, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByCIDResultsMethod, err) - } - return parseAuditResults(items, listByCIDResultsMethod) -} - -// ListAuditResultIDByNode returns a list of audit result IDs inside audit -// contract for specific epoch number, container ID and inner ring public key. -func (c *Client) ListAuditResultIDByNode(epoch uint64, cnr cid.ID, nodeKey []byte) ([]ResultID, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - prm := client.TestInvokePrm{} - prm.SetMethod(listByNodeResultsMethod) - prm.SetArgs(epoch, binCnr, nodeKey) - - items, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByNodeResultsMethod, err) - } - return parseAuditResults(items, listByNodeResultsMethod) -} - -func parseAuditResults(items []stackitem.Item, method string) ([]ResultID, error) { - if ln := len(items); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln) - } - - items, err := client.ArrayFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err) - } - - res := make([]ResultID, 0, len(items)) - for i := range items { - rawRes, err := client.BytesFromStackItem(items[i]) - if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", method, err) - } - - res = append(res, rawRes) - } - - return res, nil -} diff --git a/pkg/morph/client/audit/put_result.go b/pkg/morph/client/audit/put_result.go deleted file mode 100644 index f8e233b26..000000000 --- a/pkg/morph/client/audit/put_result.go +++ /dev/null @@ -1,40 +0,0 @@ -package audit - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" -) - -// ResultID is an identity of audit result inside audit contract. -type ResultID []byte - -// PutPrm groups parameters of PutAuditResult operation. -type PutPrm struct { - result *auditAPI.Result - - client.InvokePrmOptional -} - -// SetResult sets audit result. -func (p *PutPrm) SetResult(result *auditAPI.Result) { - p.result = result -} - -// PutAuditResult saves passed audit result structure in FrostFS system -// through Audit contract call. -// -// Returns encountered error that caused the saving to interrupt. -func (c *Client) PutAuditResult(p PutPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(putResultMethod) - prm.SetArgs(p.result.Marshal()) - prm.InvokePrmOptional = p.InvokePrmOptional - - err := c.client.Invoke(prm) - if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", putResultMethod, err) - } - return nil -} diff --git a/pkg/morph/client/audit/result_test.go b/pkg/morph/client/audit/result_test.go deleted file mode 100644 index 5ce1cc740..000000000 --- a/pkg/morph/client/audit/result_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package audit - -import ( - "context" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - auditAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestAuditResults(t *testing.T) { - t.Skip() - const epoch = 11 - - endpoint := "http://morph_chain.frostfs.devenv:30333" - sAuditHash := "cdfb3dab86e6d60e8a143d9e2ecb0b188f3dc2eb" - irKeyWIF := "L3o221BojgcCPYgdbXsm6jn7ayTZ72xwREvBHXKknR8VJ3G4WmjB" - - key, err := keys.NewPrivateKeyFromWIF(irKeyWIF) - require.NoError(t, err) - - auditHash, err := util.Uint160DecodeStringLE(sAuditHash) - require.NoError(t, err) - - morphClient, err := client.New(context.Background(), key, client.WithEndpoints(client.Endpoint{Address: endpoint})) - require.NoError(t, err) - - auditClientWrapper, err := NewFromMorph(morphClient, auditHash, 0) - require.NoError(t, err) - - id := cidtest.ID() - - var auditRes auditAPI.Result - auditRes.ForEpoch(epoch) - auditRes.SetAuditorKey(key.PublicKey().Bytes()) - auditRes.ForContainer(id) - - prm := PutPrm{} - prm.SetResult(&auditRes) - - require.NoError(t, auditClientWrapper.PutAuditResult(prm)) - - time.Sleep(5 * time.Second) - - list, err := auditClientWrapper.ListAuditResultIDByCID(epoch, id) - require.NoError(t, err) - require.Len(t, list, 1) - - savedAuditRes, err := auditClientWrapper.GetAuditResult(list[0]) - require.NoError(t, err) - - require.Equal(t, auditRes, savedAuditRes) -} diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index b93c5f75f..c9ff14a66 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -20,7 +21,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" - sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -57,8 +57,6 @@ type Client struct { acc *wallet.Account // neo account accAddr util.Uint160 // account's address - signer *transaction.Signer - notary *notaryInfo cfg cfg @@ -70,9 +68,6 @@ type Client struct { // on every normal call. switchLock *sync.RWMutex - notifications chan rpcclient.Notification - subsInfo // protected with switchLock - // channel for internal stop closeChan chan struct{} @@ -157,8 +152,6 @@ func (e *notHaltStateError) Error() string { ) } -var errEmptyInvocationScript = errors.New("got empty invocation script from neo node") - // implementation of error interface for FrostFS-specific errors. type frostfsError struct { err error @@ -188,7 +181,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, return fmt.Errorf("could not invoke %s: %w", method, err) } - c.logger.Debug("neo client invoke", + c.logger.Debug(logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) @@ -271,7 +264,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error return err } - c.logger.Debug("native gas transfer invoke", + c.logger.Debug(logs.ClientNativeGasTransferInvoke, zap.String("to", receiver.StringLE()), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -305,7 +298,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8 return err } - c.logger.Debug("batch gas transfer invoke", + c.logger.Debug(logs.ClientBatchGasTransferInvoke, zap.Strings("to", receiversLog), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -332,7 +325,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error("can't get blockchain height", + c.logger.Error(logs.ClientCantGetBlockchainHeight, zap.String("error", err.Error())) return nil } @@ -346,7 +339,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error("can't get blockchain height", + c.logger.Error(logs.ClientCantGetBlockchainHeight243, zap.String("error", err.Error())) return nil } @@ -450,64 +443,6 @@ func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) { return c.rolemgmt.GetDesignatedByRole(r, height) } -// tries to resolve sc.Parameter from the arg. -// -// Wraps any error to frostfsError. -func toStackParameter(value any) (sc.Parameter, error) { - var res = sc.Parameter{ - Value: value, - } - - switch v := value.(type) { - case []byte: - res.Type = sc.ByteArrayType - case int: - res.Type = sc.IntegerType - res.Value = big.NewInt(int64(v)) - case int64: - res.Type = sc.IntegerType - res.Value = big.NewInt(v) - case uint64: - res.Type = sc.IntegerType - res.Value = new(big.Int).SetUint64(v) - case [][]byte: - arr := make([]sc.Parameter, 0, len(v)) - for i := range v { - elem, err := toStackParameter(v[i]) - if err != nil { - return res, err - } - - arr = append(arr, elem) - } - - res.Type = sc.ArrayType - res.Value = arr - case string: - res.Type = sc.StringType - case util.Uint160: - res.Type = sc.ByteArrayType - res.Value = v.BytesBE() - case noderoles.Role: - res.Type = sc.IntegerType - res.Value = big.NewInt(int64(v)) - case keys.PublicKeys: - arr := make([][]byte, 0, len(v)) - for i := range v { - arr = append(arr, v[i].Bytes()) - } - - return toStackParameter(arr) - case bool: - res.Type = sc.BoolType - res.Value = v - default: - return res, wrapFrostFSError(fmt.Errorf("chain/client: unsupported parameter %v", value)) - } - - return res, nil -} - // MagicNumber returns the magic number of the network // to which the underlying RPC node client is connected. func (c *Client) MagicNumber() (uint64, error) { @@ -567,26 +502,11 @@ func (c *Client) IsValidScript(script []byte, signers []transaction.Signer) (val // NotificationChannel returns channel than receives subscribed // notification from the connected RPC node. -// Channel is closed when connection to the RPC node has been -// lost without the possibility of recovery. +// Channel is closed when connection to the RPC node is lost. func (c *Client) NotificationChannel() <-chan rpcclient.Notification { - return c.notifications -} - -// inactiveMode switches Client to an inactive mode: -// - notification channel is closed; -// - all the new RPC request would return ErrConnectionLost; -// - inactiveModeCb is called if not nil. -func (c *Client) inactiveMode() { - c.switchLock.Lock() - defer c.switchLock.Unlock() - - close(c.notifications) - c.inactive = true - - if c.cfg.inactiveModeCb != nil { - c.cfg.inactiveModeCb() - } + c.switchLock.RLock() + defer c.switchLock.RUnlock() + return c.client.Notifications //lint:ignore SA1019 waits for neo-go v0.102.0 https://github.com/nspcc-dev/neo-go/pull/2980 } func (c *Client) setActor(act *actor.Actor) { diff --git a/pkg/morph/client/client_test.go b/pkg/morph/client/client_test.go deleted file mode 100644 index a448c2cf4..000000000 --- a/pkg/morph/client/client_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "math/big" - "testing" - - sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" - "github.com/stretchr/testify/require" -) - -func TestToStackParameter(t *testing.T) { - items := []struct { - value any - expType sc.ParamType - expVal any - }{ - { - value: []byte{1, 2, 3}, - expType: sc.ByteArrayType, - }, - { - value: int64(100), - expType: sc.IntegerType, - expVal: big.NewInt(100), - }, - { - value: uint64(100), - expType: sc.IntegerType, - expVal: big.NewInt(100), - }, - { - value: "hello world", - expType: sc.StringType, - }, - { - value: false, - expType: sc.BoolType, - }, - { - value: true, - expType: sc.BoolType, - }, - } - - for _, item := range items { - t.Run(item.expType.String()+" to stack parameter", func(t *testing.T) { - res, err := toStackParameter(item.value) - require.NoError(t, err) - require.Equal(t, item.expType, res.Type) - if item.expVal != nil { - require.Equal(t, item.expVal, res.Value) - } else { - require.Equal(t, item.value, res.Value) - } - }) - } -} diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 9ed275029..1f2a1eb8d 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -7,13 +7,11 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/util" @@ -52,6 +50,10 @@ const ( defaultWaitInterval = 500 * time.Millisecond ) +var ( + ErrNoHealthyEndpoint = errors.New("no healthy endpoint") +) + func defaultConfig() *cfg { return &cfg{ dialTimeout: defaultDialTimeout, @@ -80,6 +82,8 @@ func defaultConfig() *cfg { // If desired option satisfies the default value, it can be omitted. // If multiple options of the same config value are supplied, // the option with the highest index in the arguments will be used. +// If the list of endpoints provided - uses first alive. +// If there are no healthy endpoint - returns ErrNoHealthyEndpoint. func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, error) { if key == nil { panic("empty private key") @@ -101,22 +105,13 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er } cli := &Client{ - cache: newClientCache(), - logger: cfg.logger, - acc: acc, - accAddr: accAddr, - signer: cfg.signer, - cfg: *cfg, - switchLock: &sync.RWMutex{}, - notifications: make(chan rpcclient.Notification), - subsInfo: subsInfo{ - blockRcv: make(chan *block.Block), - notificationRcv: make(chan *state.ContainedNotificationEvent), - notaryReqRcv: make(chan *result.NotaryRequestEvent), - subscribedEvents: make(map[util.Uint160]string), - subscribedNotaryEvents: make(map[util.Uint160]string), - }, - closeChan: make(chan struct{}), + cache: newClientCache(), + logger: cfg.logger, + acc: acc, + accAddr: accAddr, + cfg: *cfg, + switchLock: &sync.RWMutex{}, + closeChan: make(chan struct{}), } cli.endpoints.init(cfg.endpoints) @@ -138,14 +133,25 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er return nil, fmt.Errorf("could not create RPC actor: %w", err) } } else { - cli.client, act, err = cli.newCli(ctx, cli.endpoints.list[0].Address) - if err != nil { - return nil, fmt.Errorf("could not create RPC client: %w", err) + var endpoint Endpoint + for cli.endpoints.curr, endpoint = range cli.endpoints.list { + cli.client, act, err = cli.newCli(ctx, endpoint.Address) + if err != nil { + cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint, + zap.Error(err), zap.String("endpoint", endpoint.Address)) + } else { + cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint, + zap.String("endpoint", endpoint.Address)) + break + } + } + if cli.client == nil { + return nil, ErrNoHealthyEndpoint } } cli.setActor(act) - go cli.notificationLoop(ctx) + go cli.closeWaiter(ctx) return cli, nil } diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 4775cd281..009b22f3c 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -26,8 +26,12 @@ func AsContainerSource(w *Client) containercore.Source { return (*containerSource)(w) } +type getContainer interface { + Get(cid []byte) (*containercore.Container, error) +} + // Get marshals container ID, and passes it to Wrapper's Get method. -func Get(c *Client, cnr cid.ID) (*containercore.Container, error) { +func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 5d736839a..e006ca69a 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -5,11 +5,7 @@ import ( "sort" "time" - "github.com/nspcc-dev/neo-go/pkg/core/block" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -33,7 +29,8 @@ func (e *endpoints) init(ee []Endpoint) { e.list = ee } -func (c *Client) switchRPC(ctx context.Context) bool { +// SwitchRPC performs reconnection and returns true if it was successful. +func (c *Client) SwitchRPC(ctx context.Context) bool { c.switchLock.Lock() defer c.switchLock.Unlock() @@ -44,7 +41,7 @@ func (c *Client) switchRPC(ctx context.Context) bool { newEndpoint := c.endpoints.list[c.endpoints.curr].Address cli, act, err := c.newCli(ctx, newEndpoint) if err != nil { - c.logger.Warn("could not establish connection to the switched RPC node", + c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, zap.String("endpoint", newEndpoint), zap.Error(err), ) @@ -54,23 +51,11 @@ func (c *Client) switchRPC(ctx context.Context) bool { c.cache.invalidate() - c.logger.Info("connection to the new RPC node has been established", + c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, zap.String("endpoint", newEndpoint)) - subs, ok := c.restoreSubscriptions(ctx, cli, newEndpoint, false) - if !ok { - // new WS client does not allow - // restoring subscription, client - // could not work correctly => - // closing connection to RPC node - // to switch to another one - cli.Close() - continue - } - c.client = cli c.setActor(act) - c.subsInfo = subs if c.cfg.switchInterval != 0 && !c.switchIsActive.Load() && c.endpoints.list[c.endpoints.curr].Priority != c.endpoints.list[0].Priority { @@ -81,97 +66,21 @@ func (c *Client) switchRPC(ctx context.Context) bool { return true } + c.inactive = true + + if c.cfg.inactiveModeCb != nil { + c.cfg.inactiveModeCb() + } return false } -func (c *Client) notificationLoop(ctx context.Context) { - var e any - var ok bool - - for { - c.switchLock.RLock() - bChan := c.blockRcv - nChan := c.notificationRcv - nrChan := c.notaryReqRcv - c.switchLock.RUnlock() - - select { - case <-ctx.Done(): - _ = c.UnsubscribeAll() - c.close() - - return - case <-c.closeChan: - _ = c.UnsubscribeAll() - c.close() - - return - case e, ok = <-bChan: - case e, ok = <-nChan: - case e, ok = <-nrChan: - } - - if ok { - c.routeEvent(ctx, e) - continue - } - - if !c.reconnect(ctx) { - return - } - } -} - -func (c *Client) routeEvent(ctx context.Context, e any) { - typedNotification := rpcclient.Notification{Value: e} - - switch e.(type) { - case *block.Block: - typedNotification.Type = neorpc.BlockEventID - case *state.ContainedNotificationEvent: - typedNotification.Type = neorpc.NotificationEventID - case *result.NotaryRequestEvent: - typedNotification.Type = neorpc.NotaryRequestEventID - } - +func (c *Client) closeWaiter(ctx context.Context) { select { - case c.notifications <- typedNotification: case <-ctx.Done(): - _ = c.UnsubscribeAll() - c.close() case <-c.closeChan: - _ = c.UnsubscribeAll() - c.close() } -} - -func (c *Client) reconnect(ctx context.Context) bool { - if closeErr := c.client.GetError(); closeErr != nil { - c.logger.Warn("switching to the next RPC node", - zap.String("reason", closeErr.Error()), - ) - } else { - // neo-go client was closed by calling `Close` - // method, that happens only when a client has - // switched to the more prioritized RPC - return true - } - - if !c.switchRPC(ctx) { - c.logger.Error("could not establish connection to any RPC node") - - // could not connect to all endpoints => - // switch client to inactive mode - c.inactiveMode() - - return false - } - - // TODO(@carpawell): call here some callback retrieved in constructor - // of the client to allow checking chain state since during switch - // process some notification could be lost - - return true + _ = c.UnsubscribeAll() + c.close() } func (c *Client) switchToMostPrioritized(ctx context.Context) { @@ -210,43 +119,35 @@ mainLoop: cli, act, err := c.newCli(ctx, tryE) if err != nil { - c.logger.Warn("could not create client to the higher priority node", + c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode, zap.String("endpoint", tryE), zap.Error(err), ) continue } - if subs, ok := c.restoreSubscriptions(ctx, cli, tryE, true); ok { - c.switchLock.Lock() - - // higher priority node could have been - // connected in the other goroutine - if e.Priority >= c.endpoints.list[c.endpoints.curr].Priority { - cli.Close() - c.switchLock.Unlock() - return - } - - c.client.Close() - c.cache.invalidate() - c.client = cli - c.setActor(act) - c.subsInfo = subs - c.endpoints.curr = i + c.switchLock.Lock() + // higher priority node could have been + // connected in the other goroutine + if e.Priority >= c.endpoints.list[c.endpoints.curr].Priority { + cli.Close() c.switchLock.Unlock() - - c.logger.Info("switched to the higher priority RPC", - zap.String("endpoint", tryE)) - return } - c.logger.Warn("could not restore side chain subscriptions using node", - zap.String("endpoint", tryE), - zap.Error(err), - ) + c.client.Close() + c.cache.invalidate() + c.client = cli + c.setActor(act) + c.endpoints.curr = i + + c.switchLock.Unlock() + + c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC, + zap.String("endpoint", tryE)) + + return } } } @@ -254,6 +155,7 @@ mainLoop: // close closes notification channel and wrapped WS client. func (c *Client) close() { - close(c.notifications) + c.switchLock.RLock() + defer c.switchLock.RUnlock() c.client.Close() } diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 6b721cdfb..c837cbf0c 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -3,7 +3,6 @@ package netmap import ( "errors" "fmt" - "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/encoding/bigint" @@ -11,24 +10,22 @@ import ( ) const ( - maxObjectSizeConfig = "MaxObjectSize" - basicIncomeRateConfig = "BasicIncomeRate" - auditFeeConfig = "AuditFee" - epochDurationConfig = "EpochDuration" - containerFeeConfig = "ContainerFee" - containerAliasFeeConfig = "ContainerAliasFee" - etIterationsConfig = "EigenTrustIterations" - etAlphaConfig = "EigenTrustAlpha" - irCandidateFeeConfig = "InnerRingCandidateFee" - withdrawFeeConfig = "WithdrawFee" - homomorphicHashingDisabledKey = "HomomorphicHashingDisabled" - maintenanceModeAllowedConfig = "MaintenanceModeAllowed" + MaxObjectSizeConfig = "MaxObjectSize" + BasicIncomeRateConfig = "BasicIncomeRate" + AuditFeeConfig = "AuditFee" + EpochDurationConfig = "EpochDuration" + ContainerFeeConfig = "ContainerFee" + ContainerAliasFeeConfig = "ContainerAliasFee" + IrCandidateFeeConfig = "InnerRingCandidateFee" + WithdrawFeeConfig = "WithdrawFee" + HomomorphicHashingDisabledKey = "HomomorphicHashingDisabled" + MaintenanceModeAllowedConfig = "MaintenanceModeAllowed" ) // MaxObjectSize receives max object size configuration // value through the Netmap contract call. func (c *Client) MaxObjectSize() (uint64, error) { - objectSize, err := c.readUInt64Config(maxObjectSizeConfig) + objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err) } @@ -39,7 +36,7 @@ func (c *Client) MaxObjectSize() (uint64, error) { // BasicIncomeRate returns basic income rate configuration value from network // config in netmap contract. func (c *Client) BasicIncomeRate() (uint64, error) { - rate, err := c.readUInt64Config(basicIncomeRateConfig) + rate, err := c.readUInt64Config(BasicIncomeRateConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get basic income rate: %w", c, err) } @@ -50,7 +47,7 @@ func (c *Client) BasicIncomeRate() (uint64, error) { // AuditFee returns audit fee configuration value from network // config in netmap contract. func (c *Client) AuditFee() (uint64, error) { - fee, err := c.readUInt64Config(auditFeeConfig) + fee, err := c.readUInt64Config(AuditFeeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get audit fee: %w", c, err) } @@ -60,7 +57,7 @@ func (c *Client) AuditFee() (uint64, error) { // EpochDuration returns number of sidechain blocks per one FrostFS epoch. func (c *Client) EpochDuration() (uint64, error) { - epochDuration, err := c.readUInt64Config(epochDurationConfig) + epochDuration, err := c.readUInt64Config(EpochDurationConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err) } @@ -71,7 +68,7 @@ func (c *Client) EpochDuration() (uint64, error) { // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. func (c *Client) ContainerFee() (uint64, error) { - fee, err := c.readUInt64Config(containerFeeConfig) + fee, err := c.readUInt64Config(ContainerFeeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err) } @@ -82,7 +79,7 @@ func (c *Client) ContainerFee() (uint64, error) { // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. func (c *Client) ContainerAliasFee() (uint64, error) { - fee, err := c.readUInt64Config(containerAliasFeeConfig) + fee, err := c.readUInt64Config(ContainerAliasFeeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err) } @@ -90,40 +87,18 @@ func (c *Client) ContainerAliasFee() (uint64, error) { return fee, nil } -// EigenTrustIterations returns global configuration value of iteration cycles -// for EigenTrust algorithm per epoch. -func (c *Client) EigenTrustIterations() (uint64, error) { - iterations, err := c.readUInt64Config(etIterationsConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get eigen trust iterations: %w", c, err) - } - - return iterations, nil -} - -// EigenTrustAlpha returns global configuration value of alpha parameter. -// It receives the alpha as a string and tries to convert it to float. -func (c *Client) EigenTrustAlpha() (float64, error) { - strAlpha, err := c.readStringConfig(etAlphaConfig) - if err != nil { - return 0, fmt.Errorf("(%T) could not get eigen trust alpha: %w", c, err) - } - - return strconv.ParseFloat(strAlpha, 64) -} - // HomomorphicHashDisabled returns global configuration value of homomorphic hashing // settings. // // Returns (false, nil) if config key is not found in the contract. func (c *Client) HomomorphicHashDisabled() (bool, error) { - return c.readBoolConfig(homomorphicHashingDisabledKey) + return c.readBoolConfig(HomomorphicHashingDisabledKey) } // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. func (c *Client) InnerRingCandidateFee() (uint64, error) { - fee, err := c.readUInt64Config(irCandidateFeeConfig) + fee, err := c.readUInt64Config(IrCandidateFeeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err) } @@ -134,7 +109,7 @@ func (c *Client) InnerRingCandidateFee() (uint64, error) { // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. func (c *Client) WithdrawFee() (uint64, error) { - fee, err := c.readUInt64Config(withdrawFeeConfig) + fee, err := c.readUInt64Config(WithdrawFeeConfig) if err != nil { return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err) } @@ -148,7 +123,7 @@ func (c *Client) WithdrawFee() (uint64, error) { // // By default, maintenance state is disallowed. func (c *Client) MaintenanceModeAllowed() (bool, error) { - return c.readBoolConfig(maintenanceModeAllowedConfig) + return c.readBoolConfig(MaintenanceModeAllowedConfig) } func (c *Client) readUInt64Config(key string) (uint64, error) { @@ -161,16 +136,6 @@ func (c *Client) readUInt64Config(key string) (uint64, error) { return uint64(v.(int64)), nil } -func (c *Client) readStringConfig(key string) (string, error) { - v, err := c.config([]byte(key), StringAssert) - if err != nil { - return "", err - } - - // StringAssert is guaranteed to return string if the error is nil. - return v.(string), nil -} - // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. func (c *Client) readBoolConfig(key string) (bool, error) { @@ -246,10 +211,6 @@ type NetworkConfiguration struct { ContainerAliasFee uint64 - EigenTrustIterations uint64 - - EigenTrustAlpha float64 - IRCandidateFee uint64 WithdrawalFee uint64 @@ -299,32 +260,25 @@ func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { Name: name, Value: value, }) - case maxObjectSizeConfig: + case MaxObjectSizeConfig: res.MaxObjectSize = bytesToUint64(value) - case basicIncomeRateConfig: + case BasicIncomeRateConfig: res.StoragePrice = bytesToUint64(value) - case auditFeeConfig: + case AuditFeeConfig: res.AuditFee = bytesToUint64(value) - case epochDurationConfig: + case EpochDurationConfig: res.EpochDuration = bytesToUint64(value) - case containerFeeConfig: + case ContainerFeeConfig: res.ContainerFee = bytesToUint64(value) - case containerAliasFeeConfig: + case ContainerAliasFeeConfig: res.ContainerAliasFee = bytesToUint64(value) - case etIterationsConfig: - res.EigenTrustIterations = bytesToUint64(value) - case etAlphaConfig: - res.EigenTrustAlpha, err = strconv.ParseFloat(string(value), 64) - if err != nil { - return fmt.Errorf("invalid prm %s: %v", etAlphaConfig, err) - } - case irCandidateFeeConfig: + case IrCandidateFeeConfig: res.IRCandidateFee = bytesToUint64(value) - case withdrawFeeConfig: + case WithdrawFeeConfig: res.WithdrawalFee = bytesToUint64(value) - case homomorphicHashingDisabledKey: + case HomomorphicHashingDisabledKey: res.HomomorphicHashingDisabled = bytesToBool(value) - case maintenanceModeAllowedConfig: + case MaintenanceModeAllowedConfig: res.MaintenanceModeAllowed = bytesToBool(value) } diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index 0b4d31b1d..7a63f14d7 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -8,10 +8,14 @@ import ( // NewEpoch updates FrostFS epoch number through // Netmap contract call. -func (c *Client) NewEpoch(epoch uint64) error { +// If `force` is true, this call is normally initiated by a control +// service command and uses a control notary transaction internally +// to ensure all nodes produce the same transaction with high probability. +func (c *Client) NewEpoch(epoch uint64, force bool) error { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) + prm.SetControlTX(force) if err := c.client.Invoke(prm); err != nil { return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) diff --git a/pkg/morph/client/netmap/add_peer.go b/pkg/morph/client/netmap/peer.go similarity index 70% rename from pkg/morph/client/netmap/add_peer.go rename to pkg/morph/client/netmap/peer.go index dc6c25540..7ceaa0250 100644 --- a/pkg/morph/client/netmap/add_peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -41,3 +41,19 @@ func (c *Client) AddPeer(p AddPeerPrm) error { } return nil } + +// ForceRemovePeer marks the given peer as offline via a notary control transaction. +func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo) error { + if !c.client.WithNotary() { + return fmt.Errorf("peer can be forcefully removed only in notary environment") + } + + prm := UpdatePeerPrm{} + prm.SetKey(nodeInfo.PublicKey()) + prm.SetControlTX(true) + + if err := c.UpdatePeerState(prm); err != nil { + return fmt.Errorf("updating peer state: %v", err) + } + return nil +} diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index 0a23aa47a..2f7079dfe 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -32,10 +32,6 @@ const ( NNSNetmapContractName = "netmap.frostfs" // NNSProxyContractName is a name of the proxy contract in NNS. NNSProxyContractName = "proxy.frostfs" - // NNSReputationContractName is a name of the reputation contract in NNS. - NNSReputationContractName = "reputation.frostfs" - // NNSSubnetworkContractName is a name of the subnet contract in NNS. - NNSSubnetworkContractName = "subnet.frostfs" // NNSGroupKeyName is a name for the FrostFS group key record in NNS. NNSGroupKeyName = "group.frostfs" ) @@ -208,8 +204,18 @@ func (c *Client) SetGroupSignerScope() error { return err } - c.signer.Scopes = transaction.CustomGroups - c.signer.AllowedGroups = []*keys.PublicKey{pub} + // Don't change c before everything is OK. + cfg := c.cfg + cfg.signer = &transaction.Signer{ + Scopes: transaction.CustomGroups | transaction.CalledByEntry, + AllowedGroups: []*keys.PublicKey{pub}, + } + rpcActor, err := newActor(c.client, c.acc, cfg) + if err != nil { + return err + } + c.cfg = cfg + c.setActor(rpcActor) return nil } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 96dca0319..1ed1ca912 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -1,6 +1,7 @@ package client import ( + "crypto/elliptic" "encoding/binary" "errors" "fmt" @@ -8,6 +9,7 @@ import ( "math/big" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -17,19 +19,20 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/neorpc" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/notary" sc "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" + "github.com/nspcc-dev/neo-go/pkg/vm" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "go.uber.org/zap" ) type ( notaryInfo struct { - txValidTime uint32 // minimum amount of blocks when mainTx will be valid - roundTime uint32 // extra amount of blocks to synchronize sidechain height diff of inner ring nodes - fallbackTime uint32 // mainTx's ValidUntilBlock - fallbackTime + 1 is when fallbackTx is sent + txValidTime uint32 // minimum amount of blocks when mainTx will be valid + roundTime uint32 // extra amount of blocks to synchronize sidechain height diff of inner ring nodes alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness @@ -40,7 +43,7 @@ type ( notaryCfg struct { proxy util.Uint160 - txValidTime, roundTime, fallbackTime uint32 + txValidTime, roundTime uint32 alphabetSource AlphabetKeys } @@ -50,9 +53,8 @@ type ( ) const ( - defaultNotaryValidTime = 50 - defaultNotaryRoundTime = 100 - defaultNotaryFallbackTime = 40 + defaultNotaryValidTime = 50 + defaultNotaryRoundTime = 100 notaryBalanceOfMethod = "balanceOf" notaryExpirationOfMethod = "expirationOf" @@ -68,7 +70,6 @@ func defaultNotaryConfig(c *Client) *notaryCfg { return ¬aryCfg{ txValidTime: defaultNotaryValidTime, roundTime: defaultNotaryRoundTime, - fallbackTime: defaultNotaryFallbackTime, alphabetSource: c.Committee, } } @@ -103,7 +104,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { proxy: cfg.proxy, txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, - fallbackTime: cfg.fallbackTime, alphabetSource: cfg.alphabetSource, notary: notary.Hash, } @@ -203,7 +203,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info("notary deposit has already been made", + c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -211,7 +211,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 return util.Uint256{}, nil } - c.logger.Info("notary deposit invoke", + c.logger.Info(logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -407,37 +407,32 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return fmt.Errorf("could not fetch current alphabet keys: %w", err) } - multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, false, true) + cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList) if err != nil { return err } - // error appears only if client - // is in inactive mode; that has - // been already checked above - magicNumber, _ := c.MagicNumber() + nAct, err := notary.NewActor(c.client, cosigners, c.acc) + if err != nil { + return err + } - // mainTX is expected to be pre-validated: second witness must exist and be empty - mainTx.Scripts[1].VerificationScript = multiaddrAccount.GetVerificationScript() - mainTx.Scripts[1].InvocationScript = append( - []byte{byte(opcode.PUSHDATA1), 64}, - multiaddrAccount.PrivateKey().SignHashable(uint32(magicNumber), mainTx)..., - ) + // Sign exactly the same transaction we've got from the received Notary request. + err = nAct.Sign(mainTx) + if err != nil { + return fmt.Errorf("faield to sign notary request: %w", err) + } + + mainH, fbH, untilActual, err := nAct.Notarize(mainTx, nil) - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx, - []byte{byte(opcode.RET)}, - -1, - 0, - c.notary.fallbackTime, - c.acc) if err != nil && !alreadyOnChainError(err) { return err } - c.logger.Debug("notary request with prepared main TX invoked", - zap.Uint32("fallback_valid_for", c.notary.fallbackTime), - zap.Stringer("tx_hash", resp.Hash().Reverse())) + c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked, + zap.String("tx_hash", mainH.StringLE()), + zap.Uint32("valid_until_block", untilActual), + zap.String("fallback_hash", fbH.StringLE())) return nil } @@ -453,70 +448,147 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint return err } - cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee) - if err != nil { - return err - } - - params, err := invocationParams(args...) - if err != nil { - return err - } - - test, err := c.makeTestInvocation(contract, method, params, cosigners) - if err != nil { - return err - } - - multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, committee, invokedByAlpha) - if err != nil { - return err - } - until, err := c.getUntilValue(vub) if err != nil { return err } - mainTx, err := c.buildMainTx(invokedByAlpha, nonce, alphabetList, test, cosigners, multiaddrAccount, until) + cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee) if err != nil { return err } - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx, - []byte{byte(opcode.RET)}, - -1, - 0, - c.notary.fallbackTime, - c.acc) + nAct, err := notary.NewActor(c.client, cosigners, c.acc) + if err != nil { + return err + } + + mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { + if r.State != vmstate.Halt.String() { + return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + } + + t.ValidUntilBlock = until + t.Nonce = nonce + + return nil + }, args...)) + if err != nil && !alreadyOnChainError(err) { return err } - c.logger.Debug("notary request invoked", + c.logger.Debug(logs.ClientNotaryRequestInvoked, zap.String("method", method), - zap.Uint32("valid_until_block", until), - zap.Uint32("fallback_valid_for", c.notary.fallbackTime), - zap.Stringer("tx_hash", resp.Hash().Reverse())) + zap.Uint32("valid_until_block", untilActual), + zap.String("tx_hash", mainH.StringLE()), + zap.String("fallback_hash", fbH.StringLE())) return nil } -func (c *Client) makeTestInvocation(contract util.Uint160, method string, params []sc.Parameter, cosigners []transaction.Signer) (*result.Invoke, error) { - test, err := c.client.InvokeFunction(contract, method, params, cosigners) +func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) { + multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, false, true) if err != nil { return nil, err } - if test.State != HaltState { - return nil, wrapFrostFSError(¬HaltStateError{state: test.State, exception: test.FaultException}) + // Here we need to add a committee signature (second witness) to the pre-validated + // main transaction without creating a new one. However, Notary actor demands the + // proper set of signers for constructor, thus, fill it from the main transaction's signers list. + s := make([]actor.SignerAccount, 2, 3) + s[0] = actor.SignerAccount{ + // Proxy contract that will pay for the execution. + Signer: mainTx.Signers[0], + Account: notary.FakeContractAccount(mainTx.Signers[0].Account), + } + s[1] = actor.SignerAccount{ + // Inner ring multisignature. + Signer: mainTx.Signers[1], + Account: multiaddrAccount, + } + if len(mainTx.Signers) > 3 { + // Invoker signature (simple signature account of storage node is expected). + var acc *wallet.Account + script := mainTx.Scripts[2].VerificationScript + if len(script) == 0 { + acc = notary.FakeContractAccount(mainTx.Signers[2].Account) + } else { + pubBytes, ok := vm.ParseSignatureContract(script) + if ok { + pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err) + } + acc = notary.FakeSimpleAccount(pub) + } else { + m, pubsBytes, ok := vm.ParseMultiSigContract(script) + if !ok { + return nil, errors.New("failed to parse verification script of signer #2: unknown witness type") + } + pubs := make(keys.PublicKeys, len(pubsBytes)) + for i := range pubs { + pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err) + } + } + acc, err = notary.FakeMultisigAccount(m, pubs) + if err != nil { + return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err) + } + } + } + s = append(s, actor.SignerAccount{ + Signer: mainTx.Signers[2], + Account: acc, + }) } - if len(test.Script) == 0 { - return nil, wrapFrostFSError(errEmptyInvocationScript) + return s, nil +} + +func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]actor.SignerAccount, error) { + multiaddrAccount, err := c.notaryMultisigAccount(ir, committee, invokedByAlpha) + if err != nil { + return nil, err } - return test, nil + s := make([]actor.SignerAccount, 2, 3) + // Proxy contract that will pay for the execution. + s[0] = actor.SignerAccount{ + Signer: transaction.Signer{ + Account: c.notary.proxy, + Scopes: transaction.None, + }, + Account: notary.FakeContractAccount(c.notary.proxy), + } + // Inner ring multisignature. + s[1] = actor.SignerAccount{ + Signer: transaction.Signer{ + Account: multiaddrAccount.ScriptHash(), + Scopes: c.cfg.signer.Scopes, + AllowedContracts: c.cfg.signer.AllowedContracts, + AllowedGroups: c.cfg.signer.AllowedGroups, + }, + Account: multiaddrAccount, + } + + if !invokedByAlpha { + // Invoker signature. + s = append(s, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: hash.Hash160(c.acc.GetVerificationScript()), + Scopes: c.cfg.signer.Scopes, + AllowedContracts: c.cfg.signer.AllowedContracts, + AllowedGroups: c.cfg.signer.AllowedGroups, + }, + Account: c.acc, + }) + } + + // The last one is Notary contract that will be added to the signers list + // by Notary actor automatically. + return s, nil } func (c *Client) getUntilValue(vub *uint32) (uint32, error) { @@ -526,195 +598,6 @@ func (c *Client) getUntilValue(vub *uint32) (uint32, error) { return c.notaryTxValidationLimit() } -func (c *Client) buildMainTx(invokedByAlpha bool, nonce uint32, alphabetList keys.PublicKeys, test *result.Invoke, - cosigners []transaction.Signer, multiaddrAccount *wallet.Account, until uint32) (*transaction.Transaction, error) { - // after test invocation we build main multisig transaction - - u8n := uint8(len(alphabetList)) - - if !invokedByAlpha { - u8n++ - } - - // prepare main tx - mainTx := &transaction.Transaction{ - Nonce: nonce, - SystemFee: test.GasConsumed, - ValidUntilBlock: until, - Script: test.Script, - Attributes: []transaction.Attribute{ - { - Type: transaction.NotaryAssistedT, - Value: &transaction.NotaryAssisted{NKeys: u8n}, - }, - }, - Signers: cosigners, - } - - // calculate notary fee - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - notaryFee, err := c.client.CalculateNotaryFee(u8n) - if err != nil { - return nil, err - } - - // add network fee for cosigners - //nolint:staticcheck // waits for neo-go v0.99.3 with notary actors - //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202 - err = c.client.AddNetworkFee( - mainTx, - notaryFee, - c.notaryAccounts(invokedByAlpha, multiaddrAccount)..., - ) - if err != nil { - return nil, err - } - - // define witnesses - mainTx.Scripts = c.notaryWitnesses(invokedByAlpha, multiaddrAccount, mainTx) - - return mainTx, nil -} - -func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]transaction.Signer, error) { - s := make([]transaction.Signer, 0, 4) - - // first we have proxy contract signature, as it will pay for the execution - s = append(s, transaction.Signer{ - Account: c.notary.proxy, - Scopes: transaction.None, - }) - - // then we have inner ring multiaddress signature - m := sigCount(ir, committee) - - multisigScript, err := sc.CreateMultiSigRedeemScript(m, ir) - if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't create ir multisig redeem script: %w", err)) - } - - s = append(s, transaction.Signer{ - Account: hash.Hash160(multisigScript), - Scopes: c.signer.Scopes, - AllowedContracts: c.signer.AllowedContracts, - AllowedGroups: c.signer.AllowedGroups, - }) - - if !invokedByAlpha { - // then we have invoker signature - s = append(s, transaction.Signer{ - Account: hash.Hash160(c.acc.GetVerificationScript()), - Scopes: c.signer.Scopes, - AllowedContracts: c.signer.AllowedContracts, - AllowedGroups: c.signer.AllowedGroups, - }) - } - - // last one is a placeholder for notary contract signature - s = append(s, transaction.Signer{ - Account: c.notary.notary, - Scopes: transaction.None, - }) - - return s, nil -} - -func (c *Client) notaryAccounts(invokedByAlpha bool, multiaddr *wallet.Account) []*wallet.Account { - if multiaddr == nil { - return nil - } - - a := make([]*wallet.Account, 0, 4) - - // first we have proxy account, as it will pay for the execution - a = append(a, notary.FakeContractAccount(c.notary.proxy)) - - // then we have inner ring multiaddress account - a = append(a, multiaddr) - - if !invokedByAlpha { - // then we have invoker account - a = append(a, c.acc) - } - - // last one is a placeholder for notary contract account - a = append(a, &wallet.Account{ - Contract: &wallet.Contract{}, - }) - - return a -} - -func (c *Client) notaryWitnesses(invokedByAlpha bool, multiaddr *wallet.Account, tx *transaction.Transaction) []transaction.Witness { - if multiaddr == nil || tx == nil { - return nil - } - - w := make([]transaction.Witness, 0, 4) - - // first we have empty proxy witness, because notary will execute `Verify` - // method on the proxy contract to check witness - w = append(w, transaction.Witness{ - InvocationScript: []byte{}, - VerificationScript: []byte{}, - }) - - // then we have inner ring multiaddress witness - - // invocation script should be of the form: - // { PUSHDATA1, 64, signatureBytes... } - // to pass Notary module verification - var invokeScript []byte - - magicNumber, _ := c.MagicNumber() - - if invokedByAlpha { - invokeScript = append( - []byte{byte(opcode.PUSHDATA1), 64}, - multiaddr.PrivateKey().SignHashable(uint32(magicNumber), tx)..., - ) - } else { - // we can't provide alphabet node signature - // because Storage Node doesn't own alphabet's - // private key. Thus, add dummy witness with - // empty bytes instead of signature - invokeScript = append( - []byte{byte(opcode.PUSHDATA1), 64}, - make([]byte, 64)..., - ) - } - - w = append(w, transaction.Witness{ - InvocationScript: invokeScript, - VerificationScript: multiaddr.GetVerificationScript(), - }) - - if !invokedByAlpha { - // then we have invoker witness - invokeScript = append( - []byte{byte(opcode.PUSHDATA1), 64}, - c.acc.PrivateKey().SignHashable(uint32(magicNumber), tx)..., - ) - - w = append(w, transaction.Witness{ - InvocationScript: invokeScript, - VerificationScript: c.acc.GetVerificationScript(), - }) - } - - // last one is a placeholder for notary contract witness - w = append(w, transaction.Witness{ - InvocationScript: append( - []byte{byte(opcode.PUSHDATA1), 64}, - make([]byte, 64)..., - ), - VerificationScript: []byte{}, - }) - - return w -} - func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedByAlpha bool) (*wallet.Account, error) { m := sigCount(ir, committee) @@ -771,21 +654,6 @@ func (c *Client) depositExpirationOf() (int64, error) { return currentTillBig.Int64(), nil } -func invocationParams(args ...any) ([]sc.Parameter, error) { - params := make([]sc.Parameter, 0, len(args)) - - for i := range args { - param, err := toStackParameter(args[i]) - if err != nil { - return nil, err - } - - params = append(params, param) - } - - return params, nil -} - // sigCount returns the number of required signature. // For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT). // If committee is true, returns M as N/2+1. @@ -813,15 +681,6 @@ func WithRoundTime(t uint32) NotaryOption { } } -// WithFallbackTime returns a notary support option for client -// that specifies amount of blocks before fallbackTx will be sent. -// Should be less than TxValidTime. -func WithFallbackTime(t uint32) NotaryOption { - return func(c *notaryCfg) { - c.fallbackTime = t - } -} - // WithAlphabetSource returns a notary support option for client // that specifies function to return list of alphabet node keys. // By default notary subsystem uses committee as a source. This is @@ -890,6 +749,16 @@ func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed // CalculateNonceAndVUB calculates nonce and ValidUntilBlock values // based on transaction hash. func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint32, err error) { + return c.calculateNonceAndVUB(hash, false) +} + +// CalculateNonceAndVUBControl calculates nonce and rounded ValidUntilBlock values +// based on transaction hash for use in control transactions. +func (c *Client) CalculateNonceAndVUBControl(hash util.Uint256) (nonce uint32, vub uint32, err error) { + return c.calculateNonceAndVUB(hash, true) +} + +func (c *Client) calculateNonceAndVUB(hash util.Uint256, roundBlockHeight bool) (nonce uint32, vub uint32, err error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -908,6 +777,14 @@ func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint return 0, 0, fmt.Errorf("could not get transaction height: %w", err) } + // For control transactions, we round down the block height to control the + // probability of all nodes producing the same transaction, since it depends + // on this value. + if roundBlockHeight { + inc := c.rpcActor.GetVersion().Protocol.MaxValidUntilBlockIncrement + height = height / inc * inc + } + return nonce, height + c.notary.txValidTime, nil } diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go index 300bab825..dbca00d7c 100644 --- a/pkg/morph/client/notifications.go +++ b/pkg/morph/client/notifications.go @@ -1,15 +1,11 @@ package client import ( - "context" - "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/neorpc" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" ) // Close closes connection to the remote side making @@ -23,71 +19,46 @@ func (c *Client) Close() { close(c.closeChan) } -// SubscribeForExecutionNotifications adds subscription for notifications -// generated during contract transaction execution to this instance of client. +// ReceiveExecutionNotifications performs subscription for notifications +// generated during contract execution. Events are sent to the specified channel. // // Returns ErrConnectionLost if client has not been able to establish // connection to any of passed RPC endpoints. -func (c *Client) SubscribeForExecutionNotifications(contract util.Uint160) error { +func (c *Client) ReceiveExecutionNotifications(contract util.Uint160, ch chan<- *state.ContainedNotificationEvent) (string, error) { c.switchLock.Lock() defer c.switchLock.Unlock() if c.inactive { - return ErrConnectionLost + return "", ErrConnectionLost } - _, subscribed := c.subscribedEvents[contract] - if subscribed { - // no need to subscribe one more time - return nil - } - - id, err := c.client.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, c.notificationRcv) - if err != nil { - return err - } - - c.subscribedEvents[contract] = id - - return nil + return c.client.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, ch) } -// SubscribeForNewBlocks adds subscription for new block events to this -// instance of client. +// ReceiveBlocks performs subscription for new block events. Events are sent +// to the specified channel. // // Returns ErrConnectionLost if client has not been able to establish // connection to any of passed RPC endpoints. -func (c *Client) SubscribeForNewBlocks() error { +func (c *Client) ReceiveBlocks(ch chan<- *block.Block) (string, error) { c.switchLock.Lock() defer c.switchLock.Unlock() if c.inactive { - return ErrConnectionLost + return "", ErrConnectionLost } - if c.subscribedToBlocks { - // no need to subscribe one more time - return nil - } - - _, err := c.client.ReceiveBlocks(nil, c.blockRcv) - if err != nil { - return err - } - - c.subscribedToBlocks = true - - return nil + return c.client.ReceiveBlocks(nil, ch) } -// SubscribeForNotaryRequests adds subscription for notary request payloads +// ReceiveNotaryRequests performsn subscription for notary request payloads // addition or removal events to this instance of client. Passed txSigner is // used as filter: subscription is only for the notary requests that must be -// signed by txSigner. +// signed by txSigner. Events are sent to the specified channel. // // Returns ErrConnectionLost if client has not been able to establish // connection to any of passed RPC endpoints. -func (c *Client) SubscribeForNotaryRequests(txSigner util.Uint160) error { +func (c *Client) ReceiveNotaryRequests(txSigner util.Uint160, ch chan<- *result.NotaryRequestEvent) (string, error) { if c.notary == nil { panic(notaryNotEnabledPanicMsg) } @@ -96,30 +67,17 @@ func (c *Client) SubscribeForNotaryRequests(txSigner util.Uint160) error { defer c.switchLock.Unlock() if c.inactive { - return ErrConnectionLost + return "", ErrConnectionLost } - _, subscribed := c.subscribedNotaryEvents[txSigner] - if subscribed { - // no need to subscribe one more time - return nil - } - - id, err := c.client.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &txSigner}, c.notaryReqRcv) - if err != nil { - return err - } - - c.subscribedNotaryEvents[txSigner] = id - - return nil + return c.client.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &txSigner}, ch) } -// UnsubscribeContract removes subscription for given contract event stream. +// Unsubscribe performs unsubscription for the given subscription ID. // // Returns ErrConnectionLost if client has not been able to establish // connection to any of passed RPC endpoints. -func (c *Client) UnsubscribeContract(contract util.Uint160) error { +func (c *Client) Unsubscribe(subID string) error { c.switchLock.Lock() defer c.switchLock.Unlock() @@ -127,55 +85,7 @@ func (c *Client) UnsubscribeContract(contract util.Uint160) error { return ErrConnectionLost } - _, subscribed := c.subscribedEvents[contract] - if !subscribed { - // no need to unsubscribe contract - // without subscription - return nil - } - - err := c.client.Unsubscribe(c.subscribedEvents[contract]) - if err != nil { - return err - } - - delete(c.subscribedEvents, contract) - - return nil -} - -// UnsubscribeNotaryRequest removes subscription for given notary requests -// signer. -// -// Returns ErrConnectionLost if client has not been able to establish -// connection to any of passed RPC endpoints. -func (c *Client) UnsubscribeNotaryRequest(signer util.Uint160) error { - if c.notary == nil { - panic(notaryNotEnabledPanicMsg) - } - - c.switchLock.Lock() - defer c.switchLock.Unlock() - - if c.inactive { - return ErrConnectionLost - } - - _, subscribed := c.subscribedNotaryEvents[signer] - if !subscribed { - // no need to unsubscribe signer's - // requests without subscription - return nil - } - - err := c.client.Unsubscribe(c.subscribedNotaryEvents[signer]) - if err != nil { - return err - } - - delete(c.subscribedNotaryEvents, signer) - - return nil + return c.client.Unsubscribe(subID) } // UnsubscribeAll removes all active subscriptions of current client. @@ -190,163 +100,10 @@ func (c *Client) UnsubscribeAll() error { return ErrConnectionLost } - // no need to unsubscribe if there are - // no active subscriptions - if len(c.subscribedEvents) == 0 && len(c.subscribedNotaryEvents) == 0 && - !c.subscribedToBlocks { - return nil - } - err := c.client.UnsubscribeAll() if err != nil { return err } - c.subscribedEvents = make(map[util.Uint160]string) - c.subscribedNotaryEvents = make(map[util.Uint160]string) - c.subscribedToBlocks = false - return nil } - -// subsInfo includes channels for ws notifications; -// cached subscription information. -type subsInfo struct { - blockRcv chan *block.Block - notificationRcv chan *state.ContainedNotificationEvent - notaryReqRcv chan *result.NotaryRequestEvent - - subscribedToBlocks bool - subscribedEvents map[util.Uint160]string - subscribedNotaryEvents map[util.Uint160]string -} - -// restoreSubscriptions restores subscriptions according to cached -// information about them. -// -// If it is NOT a background operation switchLock MUST be held. -// Returns a pair: the second is a restoration status and the first -// one contains subscription information applied to the passed cli -// and receivers for the updated subscriptions. -// Does not change Client instance. -func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClient, endpoint string, background bool) (si subsInfo, ok bool) { - var ( - err error - id string - ) - - stopCh := make(chan struct{}) - defer close(stopCh) - - blockRcv := make(chan *block.Block) - notificationRcv := make(chan *state.ContainedNotificationEvent) - notaryReqRcv := make(chan *result.NotaryRequestEvent) - - c.startListen(ctx, stopCh, blockRcv, notificationRcv, notaryReqRcv, background) - - if background { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - } - - si.subscribedToBlocks = c.subscribedToBlocks - si.subscribedEvents = copySubsMap(c.subscribedEvents) - si.subscribedNotaryEvents = copySubsMap(c.subscribedNotaryEvents) - si.blockRcv = blockRcv - si.notificationRcv = notificationRcv - si.notaryReqRcv = notaryReqRcv - - // new block events restoration - if si.subscribedToBlocks { - _, err = cli.ReceiveBlocks(nil, blockRcv) - if err != nil { - c.logger.Error("could not restore block subscription after RPC switch", - zap.String("endpoint", endpoint), - zap.Error(err), - ) - - return - } - } - - // notification events restoration - for contract := range si.subscribedEvents { - contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890 - id, err = cli.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, notificationRcv) - if err != nil { - c.logger.Error("could not restore notification subscription after RPC switch", - zap.String("endpoint", endpoint), - zap.Error(err), - ) - - return - } - - si.subscribedEvents[contract] = id - } - - // notary notification events restoration - if c.notary != nil { - for signer := range si.subscribedNotaryEvents { - signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890 - id, err = cli.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &signer}, notaryReqRcv) - if err != nil { - c.logger.Error("could not restore notary notification subscription after RPC switch", - zap.String("endpoint", endpoint), - zap.Error(err), - ) - - return - } - - si.subscribedNotaryEvents[signer] = id - } - } - - return si, true -} - -func (c *Client) startListen(ctx context.Context, stopCh <-chan struct{}, blockRcv <-chan *block.Block, - notificationRcv <-chan *state.ContainedNotificationEvent, notaryReqRcv <-chan *result.NotaryRequestEvent, background bool) { - // neo-go WS client says to _always_ read notifications - // from its channel. Subscribing to any notification - // while not reading them in another goroutine may - // lead to a dead-lock, thus that async side notification - // listening while restoring subscriptions - - go func() { - var e any - var ok bool - - for { - select { - case <-stopCh: - return - case e, ok = <-blockRcv: - case e, ok = <-notificationRcv: - case e, ok = <-notaryReqRcv: - } - - if !ok { - return - } - - if background { - // background client (test) switch, no need to send - // any notification, just preventing dead-lock - continue - } - - c.routeEvent(ctx, e) - } - }() -} - -func copySubsMap(m map[util.Uint160]string) map[util.Uint160]string { - newM := make(map[util.Uint160]string, len(m)) - for k, v := range m { - newM[k] = v - } - - return newM -} diff --git a/pkg/morph/client/reputation/client.go b/pkg/morph/client/reputation/client.go deleted file mode 100644 index cdaf191ad..000000000 --- a/pkg/morph/client/reputation/client.go +++ /dev/null @@ -1,83 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client is a wrapper over StaticClient -// which makes calls with the names and arguments -// of the FrostFS reputation contract. -// -// Working client must be created via constructor New. -// Using the Client that has been created with new(Client) -// expression (or just declaring a Client variable) is unsafe -// and can lead to panic. -type Client struct { - client *client.StaticClient // static reputation contract client -} - -const ( - putMethod = "put" - getMethod = "get" - getByIDMethod = "getByID" - listByEpochMethod = "listByEpoch" -) - -// NewFromMorph returns the wrapper instance from the raw morph client. -func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) { - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) - if err != nil { - return nil, fmt.Errorf("could not create static client of reputation contract: %w", err) - } - - return &Client{client: sc}, nil -} - -// Morph returns raw morph client. -func (c Client) Morph() *client.Client { - return c.client.Morph() -} - -// ContractAddress returns the address of the associated contract. -func (c Client) ContractAddress() util.Uint160 { - return c.client.ContractAddress() -} - -// Option allows to set an optional -// parameter of ClientWrapper. -type Option func(*opts) - -type opts []client.StaticClientOption - -func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } -} - -// AsAlphabet returns option to sign main TX -// of notary requests with client's private -// key. -// -// Considered to be used by IR nodes only. -func AsAlphabet() Option { - return func(o *opts) { - *o = append(*o, client.AsAlphabet()) - } -} diff --git a/pkg/morph/client/reputation/get.go b/pkg/morph/client/reputation/get.go deleted file mode 100644 index 8f1d24176..000000000 --- a/pkg/morph/client/reputation/get.go +++ /dev/null @@ -1,108 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -type ( - // GetPrm groups the arguments of "get reputation value" test invocation. - GetPrm struct { - epoch uint64 - peerID reputation.PeerID - } - - // GetByIDPrm groups the arguments of "get reputation value by - // reputation id" test invocation. - GetByIDPrm struct { - id ID - } -) - -// SetEpoch sets epoch of expected reputation value. -func (g *GetPrm) SetEpoch(v uint64) { - g.epoch = v -} - -// SetPeerID sets peer id of expected reputation value. -func (g *GetPrm) SetPeerID(v reputation.PeerID) { - g.peerID = v -} - -// SetID sets id of expected reputation value in reputation contract. -func (g *GetByIDPrm) SetID(v ID) { - g.id = v -} - -// Get invokes the call of "get reputation value" method of reputation contract. -func (c *Client) Get(p GetPrm) ([]reputation.GlobalTrust, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(getMethod) - invokePrm.SetArgs(p.epoch, p.peerID.PublicKey()) - - res, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err) - } - - return parseReputations(res, getMethod) -} - -// GetByID invokes the call of "get reputation value by reputation id" method -// of reputation contract. -func (c *Client) GetByID(p GetByIDPrm) ([]reputation.GlobalTrust, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(getByIDMethod) - invokePrm.SetArgs([]byte(p.id)) - - prms, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getByIDMethod, err) - } - - return parseReputations(prms, getByIDMethod) -} - -func parseGetResult(rawReputations [][]byte, method string) ([]reputation.GlobalTrust, error) { - reputations := make([]reputation.GlobalTrust, 0, len(rawReputations)) - - for i := range rawReputations { - r := reputation.GlobalTrust{} - - err := r.Unmarshal(rawReputations[i]) - if err != nil { - return nil, fmt.Errorf("can't unmarshal global trust value (%s): %w", method, err) - } - - reputations = append(reputations, r) - } - - return reputations, nil -} - -func parseReputations(items []stackitem.Item, method string) ([]reputation.GlobalTrust, error) { - if ln := len(items); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln) - } - - items, err := client.ArrayFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err) - } - - res := make([][]byte, 0, len(items)) - - for i := range items { - rawReputation, err := client.BytesFromStackItem(items[i]) - if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", method, err) - } - - res = append(res, rawReputation) - } - - return parseGetResult(res, method) -} diff --git a/pkg/morph/client/reputation/list.go b/pkg/morph/client/reputation/list.go deleted file mode 100644 index 0090efb10..000000000 --- a/pkg/morph/client/reputation/list.go +++ /dev/null @@ -1,55 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -type ( - // ID is an ID of the reputation record in reputation contract. - ID []byte - - // ListByEpochArgs groups the arguments of - // "list reputation ids by epoch" test invoke call. - ListByEpochArgs struct { - epoch uint64 - } -) - -// SetEpoch sets epoch of expected reputation ids. -func (l *ListByEpochArgs) SetEpoch(v uint64) { - l.epoch = v -} - -// ListByEpoch invokes the call of "list reputation ids by epoch" method of -// reputation contract. -func (c *Client) ListByEpoch(p ListByEpochArgs) ([]ID, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(listByEpochMethod) - invokePrm.SetArgs(p.epoch) - - prms, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByEpochMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listByEpochMethod, ln) - } - - items, err := client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listByEpochMethod, err) - } - - result := make([]ID, 0, len(items)) - for i := range items { - rawReputation, err := client.BytesFromStackItem(items[i]) - if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listByEpochMethod, err) - } - - result = append(result, rawReputation) - } - - return result, nil -} diff --git a/pkg/morph/client/reputation/put.go b/pkg/morph/client/reputation/put.go deleted file mode 100644 index 02b47defe..000000000 --- a/pkg/morph/client/reputation/put.go +++ /dev/null @@ -1,47 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -type ( - // PutPrm groups the arguments of "put reputation value" invocation call. - PutPrm struct { - epoch uint64 - peerID reputation.PeerID - value reputation.GlobalTrust - } -) - -// SetEpoch sets epoch of reputation value. -func (p *PutPrm) SetEpoch(v uint64) { - p.epoch = v -} - -// SetPeerID sets peer id of reputation value. -func (p *PutPrm) SetPeerID(v reputation.PeerID) { - p.peerID = v -} - -// SetValue sets reputation value. -func (p *PutPrm) SetValue(v reputation.GlobalTrust) { - p.value = v -} - -// Put invokes direct call of "put reputation value" method of reputation contract. -// -// If TryNotary is provided, calls notary contract. -func (c *Client) Put(p PutPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(putMethod) - prm.SetArgs(p.epoch, p.peerID.PublicKey(), p.value.Marshal()) - - err := c.client.Invoke(prm) - if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", putMethod, err) - } - return nil -} diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index afaf49f3d..910f78537 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -94,6 +94,12 @@ type InvokePrmOptional struct { // `validUntilBlock` values by all notification // receivers. hash *util.Uint256 + // controlTX controls whether the invoke method will use a rounded + // block height value, which is useful for control transactions which + // are required to be produced by all nodes with very high probability. + // It's only used by notary transactions and it affects only the + // computation of `validUntilBlock` values. + controlTX bool } // SetHash sets optional hash of the transaction. @@ -104,6 +110,11 @@ func (i *InvokePrmOptional) SetHash(hash util.Uint256) { i.hash = &hash } +// SetControlTX sets whether a control transaction will be used. +func (i *InvokePrmOptional) SetControlTX(b bool) { + i.controlTX = b +} + // Invoke calls Invoke method of Client with static internal script hash and fee. // Supported args types are the same as in Client. // @@ -126,7 +137,11 @@ func (s StaticClient) Invoke(prm InvokePrm) error { ) if prm.hash != nil { - nonce, vub, err = s.client.CalculateNonceAndVUB(*prm.hash) + if prm.controlTX { + nonce, vub, err = s.client.CalculateNonceAndVUBControl(*prm.hash) + } else { + nonce, vub, err = s.client.CalculateNonceAndVUB(*prm.hash) + } if err != nil { return fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err) } diff --git a/pkg/morph/client/subnet/admin.go b/pkg/morph/client/subnet/admin.go deleted file mode 100644 index 387da656d..000000000 --- a/pkg/morph/client/subnet/admin.go +++ /dev/null @@ -1,87 +0,0 @@ -package morphsubnet - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - -// ManageAdminsPrm groups parameters of administer methods of Subnet contract. -// -// Zero value adds node admin. Subnet, key and group must be specified via setters. -type ManageAdminsPrm struct { - // remove or add admin - rm bool - - // client or node admin - client bool - - subnet []byte - - admin []byte - - group []byte -} - -// SetRemove marks admin to be removed. By default, admin is added. -func (x *ManageAdminsPrm) SetRemove() { - x.rm = true -} - -// SetClient switches to client admin. By default, node admin is modified. -func (x *ManageAdminsPrm) SetClient() { - x.client = true -} - -// SetSubnet sets identifier of the subnet in a binary FrostFS API protocol format. -func (x *ManageAdminsPrm) SetSubnet(id []byte) { - x.subnet = id -} - -// SetAdmin sets admin's public key in a binary format. -func (x *ManageAdminsPrm) SetAdmin(key []byte) { - x.admin = key -} - -// SetGroup sets identifier of the client group in a binary FrostFS API protocol format. -// Makes sense only for client admins (see ManageAdminsPrm.SetClient). -func (x *ManageAdminsPrm) SetGroup(id []byte) { - x.group = id -} - -// ManageAdminsRes groups the resulting values of node administer methods of Subnet contract. -type ManageAdminsRes struct{} - -// ManageAdmins manages admin list of the FrostFS subnet through Subnet contract calls. -func (x Client) ManageAdmins(prm ManageAdminsPrm) (*ManageAdminsPrm, error) { - var method string - - args := make([]any, 1, 3) - args[0] = prm.subnet - - if prm.client { - args = append(args, prm.group, prm.admin) - - if prm.rm { - method = removeClientAdminMethod - } else { - method = addClientAdminMethod - } - } else { - args = append(args, prm.admin) - - if prm.rm { - method = removeNodeAdminMethod - } else { - method = addNodeAdminMethod - } - } - - var prmInvoke client.InvokePrm - - prmInvoke.SetMethod(method) - prmInvoke.SetArgs(args...) - - err := x.client.Invoke(prmInvoke) - if err != nil { - return nil, err - } - - return new(ManageAdminsPrm), nil -} diff --git a/pkg/morph/client/subnet/client.go b/pkg/morph/client/subnet/client.go deleted file mode 100644 index 8cbae8f95..000000000 --- a/pkg/morph/client/subnet/client.go +++ /dev/null @@ -1,108 +0,0 @@ -package morphsubnet - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Client represents Subnet contract client. -// -// Client should be preliminary initialized (see Init method). -type Client struct { - client *client.StaticClient -} - -// InitPrm groups parameters of Client's initialization. -type InitPrm struct { - base *client.Client - - addr util.Uint160 - - modeSet bool - mode Mode -} - -const ( - deleteMethod = "delete" - getMethod = "get" - putMethod = "put" - - removeClientAdminMethod = "removeClientAdmin" - addClientAdminMethod = "addClientAdmin" - - userAllowedMethod = "userAllowed" - removeUserMethod = "removeUser" - addUserMethod = "addUser" - - removeNodeAdminMethod = "removeNodeAdmin" - addNodeAdminMethod = "addNodeAdmin" - nodeAllowedMethod = "nodeAllowed" - removeNodeMethod = "removeNode" - addNodeMethod = "addNode" -) - -// SetBaseClient sets basic morph client. -func (x *InitPrm) SetBaseClient(base *client.Client) { - x.base = base -} - -// SetContractAddress sets address of Subnet contract in FrostFS sidechain. -func (x *InitPrm) SetContractAddress(addr util.Uint160) { - x.addr = addr -} - -// Mode regulates client work mode. -type Mode uint8 - -const ( - _ Mode = iota - - // NonNotary makes client to work in non-notary environment. - NonNotary - - // NotaryAlphabet makes client to use its internal key for signing the notary requests. - NotaryAlphabet - - // NotaryNonAlphabet makes client to not use its internal key for signing the notary requests. - NotaryNonAlphabet -) - -// SetMode makes client to work with non-notary sidechain. -// By default, NonNotary is used. -func (x *InitPrm) SetMode(mode Mode) { - x.modeSet = true - x.mode = mode -} - -// Init initializes client with specified parameters. -// -// Base client must be set. -func (x *Client) Init(prm InitPrm) error { - if prm.base == nil { - panic("missing base morph client") - } - - if !prm.modeSet { - prm.mode = NonNotary - } - - var opts []client.StaticClientOption - - switch prm.mode { - default: - panic(fmt.Sprintf("invalid work mode %d", prm.mode)) - case NonNotary: - case NotaryNonAlphabet: - opts = []client.StaticClientOption{client.TryNotary()} - case NotaryAlphabet: - opts = []client.StaticClientOption{client.TryNotary(), client.AsAlphabet()} - } - - var err error - - x.client, err = client.NewStatic(prm.base, prm.addr, 0, opts...) - - return err -} diff --git a/pkg/morph/client/subnet/clients.go b/pkg/morph/client/subnet/clients.go deleted file mode 100644 index 1c855496e..000000000 --- a/pkg/morph/client/subnet/clients.go +++ /dev/null @@ -1,114 +0,0 @@ -package morphsubnet - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// UserAllowedPrm groups parameters of UserAllowed method of Subnet contract. -type UserAllowedPrm struct { - args [2]any -} - -// SetID sets identifier of the subnet in a binary FrostFS API protocol format. -func (x *UserAllowedPrm) SetID(id []byte) { - x.args[0] = id -} - -// SetClient sets owner ID of the client that is being checked in a binary FrostFS API protocol format. -func (x *UserAllowedPrm) SetClient(id []byte) { - x.args[1] = id -} - -// UserAllowedRes groups the resulting values of UserAllowed method of Subnet contract. -type UserAllowedRes struct { - result bool -} - -// Allowed returns true iff the client is allowed to create containers in the subnet. -func (x UserAllowedRes) Allowed() bool { - return x.result -} - -// UserAllowed checks if the user has access to the subnetwork. -func (x *Client) UserAllowed(prm UserAllowedPrm) (*UserAllowedRes, error) { - args := client.TestInvokePrm{} - - args.SetMethod(userAllowedMethod) - args.SetArgs(prm.args[:]...) - - res, err := x.client.TestInvoke(args) - if err != nil { - return nil, fmt.Errorf("could not make test invoke: %w", err) - } - - if len(res) == 0 { - return nil, errEmptyResponse - } - - result, err := client.BoolFromStackItem(res[0]) - if err != nil { - return nil, err - } - - return &UserAllowedRes{ - result: result, - }, nil -} - -// ManageClientsPrm groups parameters of client management in Subnet contract. -// -// Zero value adds subnet client. Subnet, group and client ID must be specified via setters. -type ManageClientsPrm struct { - // remove or add client - rm bool - - args [3]any -} - -// SetRemove marks client to be removed. By default, client is added. -func (x *ManageClientsPrm) SetRemove() { - x.rm = true -} - -// SetSubnet sets identifier of the subnet in a binary FrostFS API protocol format. -func (x *ManageClientsPrm) SetSubnet(id []byte) { - x.args[0] = id -} - -// SetGroup sets identifier of the client group in a binary FrostFS API protocol format. -func (x *ManageClientsPrm) SetGroup(id []byte) { - x.args[1] = id -} - -// SetClient sets client's user ID in a binary FrostFS API protocol format. -func (x *ManageClientsPrm) SetClient(id []byte) { - x.args[2] = id -} - -// ManageClientsRes groups the resulting values of client management methods of Subnet contract. -type ManageClientsRes struct{} - -// ManageClients manages client list of the FrostFS subnet through Subnet contract calls. -func (x Client) ManageClients(prm ManageClientsPrm) (*ManageClientsRes, error) { - var method string - - if prm.rm { - method = removeUserMethod - } else { - method = addUserMethod - } - - var prmInvoke client.InvokePrm - - prmInvoke.SetMethod(method) - prmInvoke.SetArgs(prm.args[:]...) - - err := x.client.Invoke(prmInvoke) - if err != nil { - return nil, err - } - - return new(ManageClientsRes), nil -} diff --git a/pkg/morph/client/subnet/delete.go b/pkg/morph/client/subnet/delete.go deleted file mode 100644 index f7f8bb2c9..000000000 --- a/pkg/morph/client/subnet/delete.go +++ /dev/null @@ -1,40 +0,0 @@ -package morphsubnet - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// DeletePrm groups parameters of Delete method of Subnet contract. -type DeletePrm struct { - cliPrm client.InvokePrm - - args [1]any -} - -// SetTxHash sets hash of the transaction which spawned the notification. -// Ignore this parameter for new requests. -func (x *DeletePrm) SetTxHash(hash util.Uint256) { - x.cliPrm.SetHash(hash) -} - -// SetID sets identifier of the subnet to be removed in a binary FrostFS API protocol format. -func (x *DeletePrm) SetID(id []byte) { - x.args[0] = id -} - -// DeleteRes groups the resulting values of Delete method of Subnet contract. -type DeleteRes struct{} - -// Delete removes subnet though the call of the corresponding method of the Subnet contract. -func (x Client) Delete(prm DeletePrm) (*DeleteRes, error) { - prm.cliPrm.SetMethod(deleteMethod) - prm.cliPrm.SetArgs(prm.args[:]...) - - err := x.client.Invoke(prm.cliPrm) - if err != nil { - return nil, err - } - - return new(DeleteRes), nil -} diff --git a/pkg/morph/client/subnet/get.go b/pkg/morph/client/subnet/get.go deleted file mode 100644 index 5cd7c39a0..000000000 --- a/pkg/morph/client/subnet/get.go +++ /dev/null @@ -1,55 +0,0 @@ -package morphsubnet - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// GetPrm groups parameters of Get method of Subnet contract. -type GetPrm struct { - args [1]any -} - -// SetID sets identifier of the subnet to be read in a binary FrostFS API protocol format. -func (x *GetPrm) SetID(id []byte) { - x.args[0] = id -} - -// GetRes groups the resulting values of Get method of Subnet contract. -type GetRes struct { - info []byte -} - -// Info returns information about the subnet in a binary format of FrostFS API protocol. -func (x GetRes) Info() []byte { - return x.info -} - -var errEmptyResponse = errors.New("empty response") - -// Get reads the subnet through the call of the corresponding method of the Subnet contract. -func (x *Client) Get(prm GetPrm) (*GetRes, error) { - var prmGet client.TestInvokePrm - - prmGet.SetMethod(getMethod) - prmGet.SetArgs(prm.args[:]...) - - res, err := x.client.TestInvoke(prmGet) - if err != nil { - return nil, err - } - - if len(res) == 0 { - return nil, errEmptyResponse - } - - data, err := client.BytesFromStackItem(res[0]) - if err != nil { - return nil, err - } - - return &GetRes{ - info: data, - }, nil -} diff --git a/pkg/morph/client/subnet/node.go b/pkg/morph/client/subnet/node.go deleted file mode 100644 index 134b92943..000000000 --- a/pkg/morph/client/subnet/node.go +++ /dev/null @@ -1,58 +0,0 @@ -package morphsubnet - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// NodeAllowedPrm groups parameters of NodeAllowed method of Subnet contract. -type NodeAllowedPrm struct { - cliPrm client.TestInvokePrm - - args [2]any -} - -// SetID sets identifier of the subnet of the node in a binary FrostFS API protocol format. -func (x *NodeAllowedPrm) SetID(id []byte) { - x.args[0] = id -} - -// SetNode sets public key of the node that is being checked. -func (x *NodeAllowedPrm) SetNode(id []byte) { - x.args[1] = id -} - -// NodeAllowedRes groups the resulting values of NodeAllowed method of Subnet contract. -type NodeAllowedRes struct { - result bool -} - -// Allowed returns true iff the node is allowed to enter the subnet. -func (x NodeAllowedRes) Allowed() bool { - return x.result -} - -// NodeAllowed checks if the node is included in the subnetwork. -func (x *Client) NodeAllowed(prm NodeAllowedPrm) (*NodeAllowedRes, error) { - prm.cliPrm.SetMethod(nodeAllowedMethod) - prm.cliPrm.SetArgs(prm.args[:]...) - - res, err := x.client.TestInvoke(prm.cliPrm) - if err != nil { - return nil, fmt.Errorf("could not make test invoke: %w", err) - } - - if len(res) == 0 { - return nil, errEmptyResponse - } - - result, err := client.BoolFromStackItem(res[0]) - if err != nil { - return nil, err - } - - return &NodeAllowedRes{ - result: result, - }, nil -} diff --git a/pkg/morph/client/subnet/nodes.go b/pkg/morph/client/subnet/nodes.go deleted file mode 100644 index 68725a016..000000000 --- a/pkg/morph/client/subnet/nodes.go +++ /dev/null @@ -1,54 +0,0 @@ -package morphsubnet - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - -// ManageNodesPrm groups parameters of node management in Subnet contract. -// -// Zero value adds node to subnet. Subnet and node IDs must be specified via setters. -type ManageNodesPrm struct { - // remove or add node - rm bool - - args [2]any -} - -// SetRemove marks node to be removed. By default, node is added. -func (x *ManageNodesPrm) SetRemove() { - x.rm = true -} - -// SetSubnet sets identifier of the subnet in a binary NeoFS API protocol format. -func (x *ManageNodesPrm) SetSubnet(id []byte) { - x.args[0] = id -} - -// SetNode sets node's public key in a binary format. -func (x *ManageNodesPrm) SetNode(id []byte) { - x.args[1] = id -} - -// ManageNodesRes groups the resulting values of node management methods of Subnet contract. -type ManageNodesRes struct{} - -// ManageNodes manages node list of the NeoFS subnet through Subnet contract calls. -func (x Client) ManageNodes(prm ManageNodesPrm) (*ManageNodesRes, error) { - var method string - - if prm.rm { - method = removeNodeMethod - } else { - method = addNodeMethod - } - - var prmInvoke client.InvokePrm - - prmInvoke.SetMethod(method) - prmInvoke.SetArgs(prm.args[:]...) - - err := x.client.Invoke(prmInvoke) - if err != nil { - return nil, err - } - - return new(ManageNodesRes), nil -} diff --git a/pkg/morph/client/subnet/put.go b/pkg/morph/client/subnet/put.go deleted file mode 100644 index 2046e79c2..000000000 --- a/pkg/morph/client/subnet/put.go +++ /dev/null @@ -1,50 +0,0 @@ -package morphsubnet - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// PutPrm groups parameters of Put method of Subnet contract. -type PutPrm struct { - cliPrm client.InvokePrm - - args [3]any -} - -// SetTxHash sets hash of the transaction which spawned the notification. -// Ignore this parameter for new requests. -func (x *PutPrm) SetTxHash(hash util.Uint256) { - x.cliPrm.SetHash(hash) -} - -// SetID sets identifier of the created subnet in a binary FrostFS API protocol format. -func (x *PutPrm) SetID(id []byte) { - x.args[0] = id -} - -// SetOwner sets identifier of the subnet owner in a binary FrostFS API protocol format. -func (x *PutPrm) SetOwner(id []byte) { - x.args[1] = id -} - -// SetInfo sets information about the created subnet in a binary FrostFS API protocol format. -func (x *PutPrm) SetInfo(id []byte) { - x.args[2] = id -} - -// PutRes groups the resulting values of Put method of Subnet contract. -type PutRes struct{} - -// Put creates subnet though the call of the corresponding method of the Subnet contract. -func (x Client) Put(prm PutPrm) (*PutRes, error) { - prm.cliPrm.SetMethod(putMethod) - prm.cliPrm.SetArgs(prm.args[:]...) - - err := x.client.Invoke(prm.cliPrm) - if err != nil { - return nil, err - } - - return new(PutRes), nil -} diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go index 398466f51..4926af27d 100644 --- a/pkg/morph/event/container/delete.go +++ b/pkg/morph/event/container/delete.go @@ -12,77 +12,38 @@ import ( // Delete structure of container.Delete notification from morph chain. type Delete struct { - containerID []byte - signature []byte - token []byte + ContainerIDValue []byte + SignatureValue []byte + TokenValue []byte // For notary notifications only. // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest + NotaryRequestValue *payload.P2PNotaryRequest } // MorphEvent implements Neo:Morph Event interface. func (Delete) MorphEvent() {} // ContainerID is a marshalled container structure, defined in API. -func (d Delete) ContainerID() []byte { return d.containerID } +func (d Delete) ContainerID() []byte { return d.ContainerIDValue } // Signature of marshalled container by container owner. -func (d Delete) Signature() []byte { return d.signature } +func (d Delete) Signature() []byte { return d.SignatureValue } // SessionToken returns binary token of the session // within which the eACL was set. func (d Delete) SessionToken() []byte { - return d.token + return d.TokenValue } // NotaryRequest returns raw notary request if notification // was received via notary service. Otherwise, returns nil. func (d Delete) NotaryRequest() *payload.P2PNotaryRequest { - return d.notaryRequest + return d.NotaryRequestValue } const expectedItemNumDelete = 3 -// ParseDelete from notification into container event structure. -// -// Expects 3 stack items. -func ParseDelete(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Delete - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumDelete { - return nil, event.WrongNumberOfParameters(expectedItemNumDelete, ln) - } - - // parse container - ev.containerID, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get container: %w", err) - } - - // parse signature - ev.signature, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get signature: %w", err) - } - - // parse session token - ev.token, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get session token: %w", err) - } - - return ev, nil -} - // DeleteSuccess structures notification event of successful container removal // thrown by Container contract. type DeleteSuccess struct { diff --git a/pkg/morph/event/container/delete_notary.go b/pkg/morph/event/container/delete_notary.go index 371f18733..23f13acbb 100644 --- a/pkg/morph/event/container/delete_notary.go +++ b/pkg/morph/event/container/delete_notary.go @@ -7,19 +7,19 @@ import ( func (d *Delete) setContainerID(v []byte) { if v != nil { - d.containerID = v + d.ContainerIDValue = v } } func (d *Delete) setSignature(v []byte) { if v != nil { - d.signature = v + d.SignatureValue = v } } func (d *Delete) setToken(v []byte) { if v != nil { - d.token = v + d.TokenValue = v } } @@ -62,7 +62,7 @@ func ParseDeleteNotary(ne event.NotaryEvent) (event.Event, error) { } } - ev.notaryRequest = ne.Raw() + ev.NotaryRequestValue = ne.Raw() return ev, nil } diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go index 8bf894791..627c5fcf5 100644 --- a/pkg/morph/event/container/delete_test.go +++ b/pkg/morph/event/container/delete_test.go @@ -10,66 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestParseDelete(t *testing.T) { - var ( - containerID = []byte("containreID") - signature = []byte("signature") - token = []byte("token") - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - } - - _, err := ParseDelete(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) - }) - - t.Run("wrong container parameter", func(t *testing.T) { - _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong signature parameter", func(t *testing.T) { - _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerID), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong session token parameter", func(t *testing.T) { - _, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerID), - stackitem.NewByteArray(signature), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerID), - stackitem.NewByteArray(signature), - stackitem.NewByteArray(token), - })) - - require.NoError(t, err) - - require.Equal(t, Delete{ - containerID: containerID, - signature: signature, - token: token, - }, ev) - }) -} - func TestParseDeleteSuccess(t *testing.T) { t.Run("wrong number of parameters", func(t *testing.T) { prms := []stackitem.Item{ diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go index 8ef6a71af..4168d8842 100644 --- a/pkg/morph/event/container/eacl.go +++ b/pkg/morph/event/container/eacl.go @@ -1,25 +1,20 @@ package container import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/network/payload" ) // SetEACL represents structure of notification about // modified eACL table coming from FrostFS Container contract. type SetEACL struct { - table []byte - signature []byte - publicKey []byte - token []byte + TableValue []byte + SignatureValue []byte + PublicKeyValue []byte + TokenValue []byte // For notary notifications only. // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest + NotaryRequestValue *payload.P2PNotaryRequest } // MorphEvent implements Neo:Morph Event interface. @@ -27,75 +22,30 @@ func (SetEACL) MorphEvent() {} // Table returns returns eACL table in a binary FrostFS API format. func (x SetEACL) Table() []byte { - return x.table + return x.TableValue } // Signature returns signature of the binary table. func (x SetEACL) Signature() []byte { - return x.signature + return x.SignatureValue } // PublicKey returns public keys of container // owner in a binary format. func (x SetEACL) PublicKey() []byte { - return x.publicKey + return x.PublicKeyValue } // SessionToken returns binary token of the session // within which the eACL was set. func (x SetEACL) SessionToken() []byte { - return x.token + return x.TokenValue } // NotaryRequest returns raw notary request if notification // was received via notary service. Otherwise, returns nil. func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest { - return x.notaryRequest + return x.NotaryRequestValue } const expectedItemNumEACL = 4 - -// ParseSetEACL parses SetEACL notification event from list of stack items. -// -// Expects 4 stack items. -func ParseSetEACL(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev SetEACL - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumEACL { - return nil, event.WrongNumberOfParameters(expectedItemNumEACL, ln) - } - - // parse table - ev.table, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not parse binary table: %w", err) - } - - // parse signature - ev.signature, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not parse table signature: %w", err) - } - - // parse public key - ev.publicKey, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not parse binary public key: %w", err) - } - - // parse session token - ev.token, err = client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get session token: %w", err) - } - - return ev, nil -} diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go index 112065b42..a4fe7c966 100644 --- a/pkg/morph/event/container/eacl_notary.go +++ b/pkg/morph/event/container/eacl_notary.go @@ -7,25 +7,25 @@ import ( func (x *SetEACL) setTable(v []byte) { if v != nil { - x.table = v + x.TableValue = v } } func (x *SetEACL) setSignature(v []byte) { if v != nil { - x.signature = v + x.SignatureValue = v } } func (x *SetEACL) setPublicKey(v []byte) { if v != nil { - x.publicKey = v + x.PublicKeyValue = v } } func (x *SetEACL) setToken(v []byte) { if v != nil { - x.token = v + x.TokenValue = v } } @@ -69,7 +69,7 @@ func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) { } } - ev.notaryRequest = ne.Raw() + ev.NotaryRequestValue = ne.Raw() return ev, nil } diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/eacl_test.go index 2f0598597..159f6cd9f 100644 --- a/pkg/morph/event/container/eacl_test.go +++ b/pkg/morph/event/container/eacl_test.go @@ -1,90 +1,10 @@ package container import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" ) -func TestParseEACL(t *testing.T) { - var ( - binaryTable = []byte("table") - signature = []byte("signature") - publicKey = []byte("pubkey") - token = []byte("token") - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - items := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseSetEACL(createNotifyEventFromItems(items)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(items)).Error()) - }) - - t.Run("wrong container parameter", func(t *testing.T) { - _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong signature parameter", func(t *testing.T) { - _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binaryTable), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong key parameter", func(t *testing.T) { - _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binaryTable), - stackitem.NewByteArray(signature), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong session token parameter", func(t *testing.T) { - _, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binaryTable), - stackitem.NewByteArray(signature), - stackitem.NewByteArray(publicKey), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseSetEACL(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binaryTable), - stackitem.NewByteArray(signature), - stackitem.NewByteArray(publicKey), - stackitem.NewByteArray(token), - })) - require.NoError(t, err) - - e := ev.(SetEACL) - - require.Equal(t, binaryTable, e.Table()) - require.Equal(t, signature, e.Signature()) - require.Equal(t, publicKey, e.PublicKey()) - require.Equal(t, token, e.SessionToken()) - }) -} - func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { return &state.ContainedNotificationEvent{ NotificationEvent: state.NotificationEvent{ diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go index d163c6836..335034bf3 100644 --- a/pkg/morph/event/container/put.go +++ b/pkg/morph/event/container/put.go @@ -65,49 +65,6 @@ func (x PutNamed) Zone() string { return x.zone } -// ParsePut from notification into container event structure. -func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Put - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumPut { - return nil, event.WrongNumberOfParameters(expectedItemNumPut, ln) - } - - // parse container - ev.rawContainer, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get container: %w", err) - } - - // parse signature - ev.signature, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get signature: %w", err) - } - - // parse public key - ev.publicKey, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get public key: %w", err) - } - - // parse session token - ev.token, err = client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get sesison token: %w", err) - } - - return ev, nil -} - // PutSuccess structures notification event of successful container creation // thrown by Container contract. type PutSuccess struct { diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go index 2ccea296f..3622f9943 100644 --- a/pkg/morph/event/container/put_test.go +++ b/pkg/morph/event/container/put_test.go @@ -10,80 +10,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestParsePut(t *testing.T) { - var ( - containerData = []byte("containerData") - signature = []byte("signature") - publicKey = []byte("pubkey") - token = []byte("token") - ) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParsePut(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(expectedItemNumPut, len(prms)).Error()) - }) - - t.Run("wrong container parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong signature parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerData), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong key parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerData), - stackitem.NewByteArray(signature), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong session token parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerData), - stackitem.NewByteArray(signature), - stackitem.NewByteArray(publicKey), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(containerData), - stackitem.NewByteArray(signature), - stackitem.NewByteArray(publicKey), - stackitem.NewByteArray(token), - })) - require.NoError(t, err) - - require.Equal(t, Put{ - rawContainer: containerData, - signature: signature, - publicKey: publicKey, - token: token, - }, ev) - }) -} - func TestParsePutSuccess(t *testing.T) { t.Run("wrong number of parameters", func(t *testing.T) { prms := []stackitem.Item{ diff --git a/pkg/morph/event/frostfs/bind.go b/pkg/morph/event/frostfs/bind.go index 49d10d3c3..8655b1222 100644 --- a/pkg/morph/event/frostfs/bind.go +++ b/pkg/morph/event/frostfs/bind.go @@ -11,31 +11,31 @@ import ( ) type Bind struct { - bindCommon + BindCommon } -type bindCommon struct { - user []byte - keys [][]byte +type BindCommon struct { + UserValue []byte + KeysValue [][]byte - // txHash is used in notary environmental + // TxHashValue is used in notary environmental // for calculating unique but same for // all notification receivers values. - txHash util.Uint256 + TxHashValue util.Uint256 } // TxHash returns hash of the TX with new epoch // notification. -func (b bindCommon) TxHash() util.Uint256 { - return b.txHash +func (b BindCommon) TxHash() util.Uint256 { + return b.TxHashValue } // MorphEvent implements Neo:Morph Event interface. -func (bindCommon) MorphEvent() {} +func (BindCommon) MorphEvent() {} -func (b bindCommon) Keys() [][]byte { return b.keys } +func (b BindCommon) Keys() [][]byte { return b.KeysValue } -func (b bindCommon) User() []byte { return b.user } +func (b BindCommon) User() []byte { return b.UserValue } func ParseBind(e *state.ContainedNotificationEvent) (event.Event, error) { var ( @@ -48,17 +48,17 @@ func ParseBind(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) } - err = parseBind(&ev.bindCommon, params) + err = parseBind(&ev.BindCommon, params) if err != nil { return nil, err } - ev.txHash = e.Container + ev.TxHashValue = e.Container return ev, nil } -func parseBind(dst *bindCommon, params []stackitem.Item) error { +func parseBind(dst *BindCommon, params []stackitem.Item) error { if ln := len(params); ln != 2 { return event.WrongNumberOfParameters(2, ln) } @@ -66,7 +66,7 @@ func parseBind(dst *bindCommon, params []stackitem.Item) error { var err error // parse user - dst.user, err = client.BytesFromStackItem(params[0]) + dst.UserValue, err = client.BytesFromStackItem(params[0]) if err != nil { return fmt.Errorf("could not get bind user: %w", err) } @@ -77,7 +77,7 @@ func parseBind(dst *bindCommon, params []stackitem.Item) error { return fmt.Errorf("could not get bind keys: %w", err) } - dst.keys = make([][]byte, 0, len(bindKeys)) + dst.KeysValue = make([][]byte, 0, len(bindKeys)) for i := range bindKeys { rawKey, err := client.BytesFromStackItem(bindKeys[i]) @@ -85,7 +85,7 @@ func parseBind(dst *bindCommon, params []stackitem.Item) error { return fmt.Errorf("could not get bind public key: %w", err) } - dst.keys = append(dst.keys, rawKey) + dst.KeysValue = append(dst.KeysValue, rawKey) } return nil diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go index 239ddb1a4..eae2a23f5 100644 --- a/pkg/morph/event/frostfs/cheque.go +++ b/pkg/morph/event/frostfs/cheque.go @@ -11,26 +11,26 @@ import ( // Cheque structure of frostfs.Cheque notification from mainnet chain. type Cheque struct { - id []byte - amount int64 // Fixed8 - user util.Uint160 - lock util.Uint160 + IDValue []byte + AmountValue int64 // Fixed8 + UserValue util.Uint160 + LockValue util.Uint160 } // MorphEvent implements Neo:Morph Event interface. func (Cheque) MorphEvent() {} // ID is a withdraw transaction hash. -func (c Cheque) ID() []byte { return c.id } +func (c Cheque) ID() []byte { return c.IDValue } // User returns withdraw receiver script hash from main net. -func (c Cheque) User() util.Uint160 { return c.user } +func (c Cheque) User() util.Uint160 { return c.UserValue } // Amount of the sent assets. -func (c Cheque) Amount() int64 { return c.amount } +func (c Cheque) Amount() int64 { return c.AmountValue } // LockAccount return script hash for balance contract wallet. -func (c Cheque) LockAccount() util.Uint160 { return c.lock } +func (c Cheque) LockAccount() util.Uint160 { return c.LockValue } // ParseCheque from notification into cheque structure. func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { @@ -49,7 +49,7 @@ func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { } // parse id - ev.id, err = client.BytesFromStackItem(params[0]) + ev.IDValue, err = client.BytesFromStackItem(params[0]) if err != nil { return nil, fmt.Errorf("could not get cheque id: %w", err) } @@ -60,13 +60,13 @@ func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not get cheque user: %w", err) } - ev.user, err = util.Uint160DecodeBytesBE(user) + ev.UserValue, err = util.Uint160DecodeBytesBE(user) if err != nil { return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err) } // parse amount - ev.amount, err = client.IntFromStackItem(params[2]) + ev.AmountValue, err = client.IntFromStackItem(params[2]) if err != nil { return nil, fmt.Errorf("could not get cheque amount: %w", err) } @@ -77,7 +77,7 @@ func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not get cheque lock account: %w", err) } - ev.lock, err = util.Uint160DecodeBytesBE(lock) + ev.LockValue, err = util.Uint160DecodeBytesBE(lock) if err != nil { return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err) } diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go index 861f05a68..be53592ca 100644 --- a/pkg/morph/event/frostfs/cheque_test.go +++ b/pkg/morph/event/frostfs/cheque_test.go @@ -77,10 +77,10 @@ func TestParseCheque(t *testing.T) { require.NoError(t, err) require.Equal(t, Cheque{ - id: id, - amount: amount, - user: user, - lock: lock, + IDValue: id, + AmountValue: amount, + UserValue: user, + LockValue: lock, }, ev) }) } diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go index 1b9824b39..4c87634c2 100644 --- a/pkg/morph/event/frostfs/config.go +++ b/pkg/morph/event/frostfs/config.go @@ -10,30 +10,30 @@ import ( ) type Config struct { - key []byte - value []byte - id []byte + KeyValue []byte + ValueValue []byte + IDValue []byte - // txHash is used in notary environmental + // TxHashValue is used in notary environmental // for calculating unique but same for // all notification receivers values. - txHash util.Uint256 + TxHashValue util.Uint256 } // TxHash returns hash of the TX with new epoch // notification. func (u Config) TxHash() util.Uint256 { - return u.txHash + return u.TxHashValue } // MorphEvent implements Neo:Morph Event interface. func (Config) MorphEvent() {} -func (u Config) ID() []byte { return u.id } +func (u Config) ID() []byte { return u.IDValue } -func (u Config) Key() []byte { return u.key } +func (u Config) Key() []byte { return u.KeyValue } -func (u Config) Value() []byte { return u.value } +func (u Config) Value() []byte { return u.ValueValue } func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { var ( @@ -51,24 +51,24 @@ func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { } // parse id - ev.id, err = client.BytesFromStackItem(params[0]) + ev.IDValue, err = client.BytesFromStackItem(params[0]) if err != nil { return nil, fmt.Errorf("could not get config update id: %w", err) } // parse key - ev.key, err = client.BytesFromStackItem(params[1]) + ev.KeyValue, err = client.BytesFromStackItem(params[1]) if err != nil { return nil, fmt.Errorf("could not get config key: %w", err) } // parse value - ev.value, err = client.BytesFromStackItem(params[2]) + ev.ValueValue, err = client.BytesFromStackItem(params[2]) if err != nil { return nil, fmt.Errorf("could not get config value: %w", err) } - ev.txHash = e.Container + ev.TxHashValue = e.Container return ev, nil } diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go index b56c8ecb2..dcd4201e4 100644 --- a/pkg/morph/event/frostfs/config_test.go +++ b/pkg/morph/event/frostfs/config_test.go @@ -60,9 +60,9 @@ func TestParseConfig(t *testing.T) { require.NoError(t, err) require.Equal(t, Config{ - id: id, - key: key, - value: value, + IDValue: id, + KeyValue: key, + ValueValue: value, }, ev) }) } diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go index b9467d112..d8a3b82f0 100644 --- a/pkg/morph/event/frostfs/deposit.go +++ b/pkg/morph/event/frostfs/deposit.go @@ -11,26 +11,26 @@ import ( // Deposit structure of frostfs.Deposit notification from mainnet chain. type Deposit struct { - id []byte - amount int64 // Fixed8 - from util.Uint160 - to util.Uint160 + IDValue []byte + AmountValue int64 // Fixed8 + FromValue util.Uint160 + ToValue util.Uint160 } // MorphEvent implements Neo:Morph Event interface. func (Deposit) MorphEvent() {} // ID is a deposit transaction hash. -func (d Deposit) ID() []byte { return d.id } +func (d Deposit) ID() []byte { return d.IDValue } // From is a script hash of asset sender in main net. -func (d Deposit) From() util.Uint160 { return d.from } +func (d Deposit) From() util.Uint160 { return d.FromValue } // To is a script hash of asset receiver in balance contract. -func (d Deposit) To() util.Uint160 { return d.to } +func (d Deposit) To() util.Uint160 { return d.ToValue } // Amount of transferred assets. -func (d Deposit) Amount() int64 { return d.amount } +func (d Deposit) Amount() int64 { return d.AmountValue } // ParseDeposit notification into deposit structure. func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { @@ -51,13 +51,13 @@ func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not get deposit sender: %w", err) } - ev.from, err = util.Uint160DecodeBytesBE(from) + ev.FromValue, err = util.Uint160DecodeBytesBE(from) if err != nil { return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err) } // parse amount - ev.amount, err = client.IntFromStackItem(params[1]) + ev.AmountValue, err = client.IntFromStackItem(params[1]) if err != nil { return nil, fmt.Errorf("could not get deposit amount: %w", err) } @@ -68,13 +68,13 @@ func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not get deposit receiver: %w", err) } - ev.to, err = util.Uint160DecodeBytesBE(to) + ev.ToValue, err = util.Uint160DecodeBytesBE(to) if err != nil { return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err) } // parse id - ev.id, err = client.BytesFromStackItem(params[3]) + ev.IDValue, err = client.BytesFromStackItem(params[3]) if err != nil { return nil, fmt.Errorf("could not get deposit id: %w", err) } diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go index 0f52e2119..f279a7f9c 100644 --- a/pkg/morph/event/frostfs/deposit_test.go +++ b/pkg/morph/event/frostfs/deposit_test.go @@ -77,10 +77,10 @@ func TestParseDeposit(t *testing.T) { require.NoError(t, err) require.Equal(t, Deposit{ - id: id, - amount: amount, - from: from, - to: to, + IDValue: id, + AmountValue: amount, + FromValue: from, + ToValue: to, }, ev) }) } diff --git a/pkg/morph/event/frostfs/unbind.go b/pkg/morph/event/frostfs/unbind.go index f88d67994..5a6a8dad9 100644 --- a/pkg/morph/event/frostfs/unbind.go +++ b/pkg/morph/event/frostfs/unbind.go @@ -8,7 +8,7 @@ import ( ) type Unbind struct { - bindCommon + BindCommon } func ParseUnbind(e *state.ContainedNotificationEvent) (event.Event, error) { @@ -22,12 +22,12 @@ func ParseUnbind(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) } - err = parseBind(&ev.bindCommon, params) + err = parseBind(&ev.BindCommon, params) if err != nil { return nil, err } - ev.txHash = e.Container + ev.TxHashValue = e.Container return ev, nil } diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go index 3bbf76c2c..f48067f86 100644 --- a/pkg/morph/event/frostfs/withdraw.go +++ b/pkg/morph/event/frostfs/withdraw.go @@ -11,22 +11,22 @@ import ( // Withdraw structure of frostfs.Withdraw notification from mainnet chain. type Withdraw struct { - id []byte - amount int64 // Fixed8 - user util.Uint160 + IDValue []byte + AmountValue int64 // Fixed8 + UserValue util.Uint160 } // MorphEvent implements Neo:Morph Event interface. func (Withdraw) MorphEvent() {} // ID is a withdraw transaction hash. -func (w Withdraw) ID() []byte { return w.id } +func (w Withdraw) ID() []byte { return w.IDValue } // User returns withdraw receiver script hash from main net. -func (w Withdraw) User() util.Uint160 { return w.user } +func (w Withdraw) User() util.Uint160 { return w.UserValue } // Amount of the withdraw assets. -func (w Withdraw) Amount() int64 { return w.amount } +func (w Withdraw) Amount() int64 { return w.AmountValue } // ParseWithdraw notification into withdraw structure. func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { @@ -47,19 +47,19 @@ func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { return nil, fmt.Errorf("could not get withdraw user: %w", err) } - ev.user, err = util.Uint160DecodeBytesBE(user) + ev.UserValue, err = util.Uint160DecodeBytesBE(user) if err != nil { return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err) } // parse amount - ev.amount, err = client.IntFromStackItem(params[1]) + ev.AmountValue, err = client.IntFromStackItem(params[1]) if err != nil { return nil, fmt.Errorf("could not get withdraw amount: %w", err) } // parse id - ev.id, err = client.BytesFromStackItem(params[2]) + ev.IDValue, err = client.BytesFromStackItem(params[2]) if err != nil { return nil, fmt.Errorf("could not get withdraw id: %w", err) } diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go index 5544283ef..33435d19a 100644 --- a/pkg/morph/event/frostfs/withdraw_test.go +++ b/pkg/morph/event/frostfs/withdraw_test.go @@ -64,9 +64,9 @@ func TestParseWithdraw(t *testing.T) { require.NoError(t, err) require.Equal(t, Withdraw{ - id: id, - amount: amount, - user: user, + IDValue: id, + AmountValue: amount, + UserValue: user, }, ev) }) } diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 3de199328..405165702 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -133,7 +134,7 @@ var ( func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { if err := l.listen(ctx, nil); err != nil { - l.log.Error("could not start listen to events", + l.log.Error(logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) } @@ -149,7 +150,7 @@ func (l *listener) Listen(ctx context.Context) { func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { if err := l.listen(ctx, intError); err != nil { - l.log.Error("could not start listen to events", + l.log.Error(logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) intError <- err @@ -221,53 +222,53 @@ loop: if intErr != nil { intErr <- err } else { - l.log.Error("stop event listener by error", zap.Error(err)) + l.log.Error(logs.EventStopEventListenerByError, zap.Error(err)) } break loop case <-ctx.Done(): - l.log.Info("stop event listener by context", + l.log.Info(logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) break loop case notifyEvent, ok := <-chs.NotificationsCh: if !ok { - l.log.Warn("stop event listener by notification channel") + l.log.Warn(logs.EventStopEventListenerByNotificationChannel) if intErr != nil { intErr <- errors.New("event subscriber connection has been terminated") } break loop } else if notifyEvent == nil { - l.log.Warn("nil notification event was caught") + l.log.Warn(logs.EventNilNotificationEventWasCaught) continue loop } l.handleNotifyEvent(notifyEvent) case notaryEvent, ok := <-chs.NotaryRequestsCh: if !ok { - l.log.Warn("stop event listener by notary channel") + l.log.Warn(logs.EventStopEventListenerByNotaryChannel) if intErr != nil { intErr <- errors.New("notary event subscriber connection has been terminated") } break loop } else if notaryEvent == nil { - l.log.Warn("nil notary event was caught") + l.log.Warn(logs.EventNilNotaryEventWasCaught) continue loop } l.handleNotaryEvent(notaryEvent) case b, ok := <-chs.BlockCh: if !ok { - l.log.Warn("stop event listener by block channel") + l.log.Warn(logs.EventStopEventListenerByBlockChannel) if intErr != nil { intErr <- errors.New("new block notification channel is closed") } break loop } else if b == nil { - l.log.Warn("nil block was caught") + l.log.Warn(logs.EventNilBlockWasCaught) continue loop } @@ -282,7 +283,7 @@ func (l *listener) handleBlockEvent(b *block.Block) { l.blockHandlers[i](b) } }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -291,7 +292,7 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { if err := l.pool.Submit(func() { l.parseAndHandleNotary(notaryEvent) }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -300,7 +301,7 @@ func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEve if err := l.pool.Submit(func() { l.parseAndHandleNotification(notifyEvent) }); err != nil { - l.log.Warn("listener worker pool drained", + l.log.Warn(logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -327,7 +328,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if !ok { - log.Debug("event parser not set") + log.Debug(logs.EventEventParserNotSet) return } @@ -335,7 +336,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi // parse the notification event event, err := parser(notifyEvent) if err != nil { - log.Warn("could not parse notification event", + log.Warn(logs.EventCouldNotParseNotificationEvent, zap.String("error", err.Error()), ) @@ -348,7 +349,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if len(handlers) == 0 { - log.Info("notification handlers for parsed notification event were not registered", + log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -367,11 +368,11 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { switch { case errors.Is(err, ErrTXAlreadyHandled): case errors.Is(err, ErrMainTXExpired): - l.log.Warn("skip expired main TX notary event", + l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent, zap.String("error", err.Error()), ) default: - l.log.Warn("could not prepare and validate notary event", + l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent, zap.String("error", err.Error()), ) } @@ -395,7 +396,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Debug("notary parser not set") + log.Debug(logs.EventNotaryParserNotSet) return } @@ -403,7 +404,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { // parse the notary event event, err := parser(notaryEvent) if err != nil { - log.Warn("could not parse notary event", + log.Warn(logs.EventCouldNotParseNotaryEvent, zap.String("error", err.Error()), ) @@ -416,7 +417,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Info("notary handlers for parsed notification event were not registered", + log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -438,7 +439,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { parser := pi.parser() if parser == nil { - log.Info("ignore nil event parser") + log.Info(logs.EventIgnoreNilEventParser) return } @@ -447,7 +448,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { // check if the listener was started if l.started { - log.Warn("listener has been already started, ignore parser") + log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser) return } @@ -456,7 +457,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { l.notificationParsers[pi.scriptHashWithType] = pi.parser() } - log.Debug("registered new event parser") + log.Debug(logs.EventRegisteredNewEventParser) } // RegisterNotificationHandler registers the handler for particular notification event of contract. @@ -471,7 +472,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn("ignore nil event handler") + log.Warn(logs.EventIgnoreNilEventHandler) return } @@ -481,7 +482,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn("ignore handler of event w/o parser") + log.Warn(logs.EventIgnoreHandlerOfEventWoParser) return } @@ -493,7 +494,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { ) l.mtx.Unlock() - log.Debug("registered new event handler") + log.Debug(logs.EventRegisteredNewEventHandler) } // EnableNotarySupport enables notary request listening. Passed hash is @@ -534,7 +535,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { parser := pi.parser() if parser == nil { - log.Info("ignore nil notary event parser") + log.Info(logs.EventIgnoreNilNotaryEventParser) return } @@ -543,7 +544,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { // check if the listener was started if l.started { - log.Warn("listener has been already started, ignore notary parser") + log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) return } @@ -552,7 +553,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() } - log.Info("registered new event parser") + log.Info(logs.EventRegisteredNewEventParser) } // RegisterNotaryHandler registers the handler for particular notification notary request event. @@ -572,7 +573,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn("ignore nil notary event handler") + log.Warn(logs.EventIgnoreNilNotaryEventHandler) return } @@ -582,7 +583,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn("ignore handler of notary event w/o parser") + log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser) return } @@ -591,7 +592,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler() l.mtx.Unlock() - log.Info("registered new event handler") + log.Info(logs.EventRegisteredNewEventHandler) } // Stop closes subscription channel with remote neo node. @@ -603,7 +604,7 @@ func (l *listener) Stop() { func (l *listener) RegisterBlockHandler(handler BlockHandler) { if handler == nil { - l.log.Warn("ignore nil block handler") + l.log.Warn(logs.EventIgnoreNilBlockHandler) return } diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go new file mode 100644 index 000000000..dc7ac3b81 --- /dev/null +++ b/pkg/morph/event/listener_test.go @@ -0,0 +1,187 @@ +package event + +import ( + "context" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "github.com/nspcc-dev/neo-go/pkg/core/block" + "github.com/nspcc-dev/neo-go/pkg/core/state" + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestEventHandling(t *testing.T) { + blockCh := make(chan *block.Block) + notificationCh := make(chan *state.ContainedNotificationEvent) + notaryRequestsCh := make(chan *result.NotaryRequestEvent) + + l, err := NewListener(ListenerParams{ + Logger: test.NewLogger(t, true), + Subscriber: &testSubscriber{ + blockCh: blockCh, + notificationCh: notificationCh, + notaryRequestsCh: notaryRequestsCh, + }, + WorkerPoolCapacity: 10, + }) + require.NoError(t, err, "failed to create listener") + + list := l.(*listener) + + blockHandled := make(chan bool) + handledBlocks := make([]*block.Block, 0) + l.RegisterBlockHandler(func(b *block.Block) { + handledBlocks = append(handledBlocks, b) + blockHandled <- true + }) + + key := scriptHashWithType{ + scriptHashValue: scriptHashValue{ + hash: util.Uint160{100}, + }, + typeValue: typeValue{ + typ: TypeFromString("notification type"), + }, + } + + l.SetNotificationParser(NotificationParserInfo{ + scriptHashWithType: key, + p: func(cne *state.ContainedNotificationEvent) (Event, error) { + return testNotificationEvent{source: cne}, nil + }, + }) + + notificationHandled := make(chan bool) + handledNotifications := make([]Event, 0) + l.RegisterNotificationHandler(NotificationHandlerInfo{ + scriptHashWithType: key, + h: func(e Event) { + handledNotifications = append(handledNotifications, e) + notificationHandled <- true + }, + }) + + go list.Listen(context.Background()) + + t.Run("handles block events", func(t *testing.T) { + block := &block.Block{} + + blockCh <- block + + <-blockHandled + + require.Equal(t, 1, len(handledBlocks), "invalid handled blocks length") + require.Equal(t, block, handledBlocks[0], "invalid handled block") + }) + + t.Run("handles notifications", func(t *testing.T) { + notification := &state.ContainedNotificationEvent{ + Container: util.Uint256{49}, + NotificationEvent: state.NotificationEvent{ + ScriptHash: util.Uint160{100}, + Name: "notification type", + }, + } + + notificationCh <- notification + + <-notificationHandled + require.EqualValues(t, []Event{testNotificationEvent{source: notification}}, handledNotifications, "invalid handled notifications") + }) +} + +func TestErrorPassing(t *testing.T) { + blockCh := make(chan *block.Block) + notificationCh := make(chan *state.ContainedNotificationEvent) + notaryRequestsCh := make(chan *result.NotaryRequestEvent) + + t.Run("notification error", func(t *testing.T) { + nErr := fmt.Errorf("notification error") + l, err := NewListener(ListenerParams{ + Logger: test.NewLogger(t, true), + Subscriber: &testSubscriber{ + blockCh: blockCh, + notificationCh: notificationCh, + notaryRequestsCh: notaryRequestsCh, + + notificationErr: nErr, + }, + WorkerPoolCapacity: 10, + }) + require.NoError(t, err, "failed to create listener") + + errCh := make(chan error) + + go l.ListenWithError(context.Background(), errCh) + + err = <-errCh + + require.ErrorIs(t, err, nErr, "invalid notification error") + }) + + t.Run("block error", func(t *testing.T) { + bErr := fmt.Errorf("notification error") + l, err := NewListener(ListenerParams{ + Logger: test.NewLogger(t, true), + Subscriber: &testSubscriber{ + blockCh: blockCh, + notificationCh: notificationCh, + notaryRequestsCh: notaryRequestsCh, + + blockErr: bErr, + }, + WorkerPoolCapacity: 10, + }) + require.NoError(t, err, "failed to create listener") + l.RegisterBlockHandler(func(b *block.Block) {}) + + errCh := make(chan error) + + go l.ListenWithError(context.Background(), errCh) + + err = <-errCh + + require.ErrorIs(t, err, bErr, "invalid block error") + }) + +} + +type testSubscriber struct { + blockCh chan *block.Block + notificationCh chan *state.ContainedNotificationEvent + notaryRequestsCh chan *result.NotaryRequestEvent + + blockErr error + notificationErr error +} + +func (s *testSubscriber) SubscribeForNotification(...util.Uint160) error { + return s.notificationErr +} +func (s *testSubscriber) UnsubscribeForNotification() {} +func (s *testSubscriber) BlockNotifications() error { + return s.blockErr +} +func (s *testSubscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error { + return nil +} + +func (s *testSubscriber) NotificationChannels() subscriber.NotificationChannels { + return subscriber.NotificationChannels{ + BlockCh: s.blockCh, + NotificationsCh: s.notificationCh, + NotaryRequestsCh: s.notaryRequestsCh, + } +} + +func (s *testSubscriber) Close() {} + +type testNotificationEvent struct { + source *state.ContainedNotificationEvent +} + +func (e testNotificationEvent) MorphEvent() {} diff --git a/pkg/morph/event/netmap/add_peer.go b/pkg/morph/event/netmap/add_peer.go index 87cf94082..80c5559fc 100644 --- a/pkg/morph/event/netmap/add_peer.go +++ b/pkg/morph/event/netmap/add_peer.go @@ -1,56 +1,28 @@ package netmap import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/network/payload" ) type AddPeer struct { - node []byte + NodeBytes []byte // For notary notifications only. // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest + Request *payload.P2PNotaryRequest } // MorphEvent implements Neo:Morph Event interface. func (AddPeer) MorphEvent() {} func (s AddPeer) Node() []byte { - return s.node + return s.NodeBytes } // NotaryRequest returns raw notary request if notification // was received via notary service. Otherwise, returns nil. func (s AddPeer) NotaryRequest() *payload.P2PNotaryRequest { - return s.notaryRequest + return s.Request } const expectedItemNumAddPeer = 1 - -func ParseAddPeer(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev AddPeer - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumAddPeer { - return nil, event.WrongNumberOfParameters(expectedItemNumAddPeer, ln) - } - - ev.node, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get raw nodeinfo: %w", err) - } - - return ev, nil -} diff --git a/pkg/morph/event/netmap/add_peer_notary.go b/pkg/morph/event/netmap/add_peer_notary.go index a506b052d..a24722a97 100644 --- a/pkg/morph/event/netmap/add_peer_notary.go +++ b/pkg/morph/event/netmap/add_peer_notary.go @@ -7,7 +7,7 @@ import ( func (s *AddPeer) setNode(v []byte) { if v != nil { - s.node = v + s.NodeBytes = v } } @@ -43,7 +43,7 @@ func ParseAddPeerNotary(ne event.NotaryEvent) (event.Event, error) { } } - ev.notaryRequest = ne.Raw() + ev.Request = ne.Raw() return ev, nil } diff --git a/pkg/morph/event/netmap/add_peer_test.go b/pkg/morph/event/netmap/add_peer_test.go index 1b8bcf40a..4118bb8c8 100644 --- a/pkg/morph/event/netmap/add_peer_test.go +++ b/pkg/morph/event/netmap/add_peer_test.go @@ -1,47 +1,10 @@ package netmap import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" ) -func TestParseAddPeer(t *testing.T) { - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseAddPeer(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong first parameter type", func(t *testing.T) { - _, err := ParseAddPeer(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - info := []byte{1, 2, 3} - - ev, err := ParseAddPeer(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(info), - })) - - require.NoError(t, err) - require.Equal(t, AddPeer{ - node: info, - }, ev) - }) -} - func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { return &state.ContainedNotificationEvent{ NotificationEvent: state.NotificationEvent{ diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index 0eaa9f285..e454e2a6a 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -11,12 +11,12 @@ import ( // NewEpoch is a new epoch Neo:Morph event. type NewEpoch struct { - num uint64 + Num uint64 - // txHash is used in notary environmental + // Hash is used in notary environmental // for calculating unique but same for // all notification receivers values. - txHash util.Uint256 + Hash util.Uint256 } // MorphEvent implements Neo:Morph Event interface. @@ -24,13 +24,13 @@ func (NewEpoch) MorphEvent() {} // EpochNumber returns new epoch number. func (s NewEpoch) EpochNumber() uint64 { - return s.num + return s.Num } // TxHash returns hash of the TX with new epoch // notification. func (s NewEpoch) TxHash() util.Uint256 { - return s.txHash + return s.Hash } // ParseNewEpoch is a parser of new epoch notification event. @@ -52,7 +52,7 @@ func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { } return NewEpoch{ - num: uint64(prmEpochNum), - txHash: e.Container, + Num: uint64(prmEpochNum), + Hash: e.Container, }, nil } diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go index b175b5275..bc267ecb6 100644 --- a/pkg/morph/event/netmap/epoch_test.go +++ b/pkg/morph/event/netmap/epoch_test.go @@ -37,7 +37,7 @@ func TestParseNewEpoch(t *testing.T) { require.NoError(t, err) require.Equal(t, NewEpoch{ - num: epochNum, + Num: epochNum, }, ev) }) } diff --git a/pkg/morph/event/netmap/update_peer.go b/pkg/morph/event/netmap/update_peer.go index 535d57e4d..e29671131 100644 --- a/pkg/morph/event/netmap/update_peer.go +++ b/pkg/morph/event/netmap/update_peer.go @@ -1,25 +1,21 @@ package netmap import ( - "crypto/elliptic" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/network/payload" ) type UpdatePeer struct { - publicKey *keys.PublicKey + PubKey *keys.PublicKey - state netmap.NodeState + State netmap.NodeState // For notary notifications only. // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest + Request *payload.P2PNotaryRequest } // MorphEvent implements Neo:Morph Event interface. @@ -28,27 +24,27 @@ func (UpdatePeer) MorphEvent() {} // Online returns true if node's state is requested to be switched // to "online". func (s UpdatePeer) Online() bool { - return s.state == netmap.NodeStateOnline + return s.State == netmap.NodeStateOnline } // Maintenance returns true if node's state is requested to be switched // to "maintenance". func (s UpdatePeer) Maintenance() bool { - return s.state == netmap.NodeStateMaintenance + return s.State == netmap.NodeStateMaintenance } func (s UpdatePeer) PublicKey() *keys.PublicKey { - return s.publicKey + return s.PubKey } // NotaryRequest returns raw notary request if notification // was received via notary service. Otherwise, returns nil. func (s UpdatePeer) NotaryRequest() *payload.P2PNotaryRequest { - return s.notaryRequest + return s.Request } func (s *UpdatePeer) decodeState(state int64) error { - switch s.state = netmap.NodeState(state); s.state { + switch s.State = netmap.NodeState(state); s.State { default: return fmt.Errorf("unsupported node state %d", state) case @@ -60,43 +56,3 @@ func (s *UpdatePeer) decodeState(state int64) error { } const expectedItemNumUpdatePeer = 2 - -func ParseUpdatePeer(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev UpdatePeer - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumUpdatePeer { - return nil, event.WrongNumberOfParameters(expectedItemNumUpdatePeer, ln) - } - - // parse public key - key, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get public key: %w", err) - } - - ev.publicKey, err = keys.NewPublicKeyFromBytes(key, elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("could not parse public key: %w", err) - } - - // parse node status - st, err := client.IntFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get node status: %w", err) - } - - err = ev.decodeState(st) - if err != nil { - return nil, err - } - - return ev, nil -} diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go index b7a251f98..0260810b8 100644 --- a/pkg/morph/event/netmap/update_peer_notary.go +++ b/pkg/morph/event/netmap/update_peer_notary.go @@ -17,7 +17,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) { return errNilPubKey } - s.publicKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) + s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) if err != nil { return fmt.Errorf("could not parse public key: %w", err) } @@ -73,7 +73,7 @@ func ParseUpdatePeerNotary(ne event.NotaryEvent) (event.Event, error) { } } - ev.notaryRequest = ne.Raw() + ev.Request = ne.Raw() return ev, nil } diff --git a/pkg/morph/event/netmap/update_peer_test.go b/pkg/morph/event/netmap/update_peer_test.go deleted file mode 100644 index 1772c88a7..000000000 --- a/pkg/morph/event/netmap/update_peer_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package netmap - -import ( - "math/big" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseUpdatePeer(t *testing.T) { - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - - publicKey := priv.PublicKey() - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - } - - _, err := ParseUpdatePeer(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error()) - }) - - t.Run("wrong first parameter type", func(t *testing.T) { - _, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong second parameter type", func(t *testing.T) { - _, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(publicKey.Bytes()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - const state = netmap.NodeStateMaintenance - ev, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewBigInteger(big.NewInt(int64(state))), - stackitem.NewByteArray(publicKey.Bytes()), - })) - require.NoError(t, err) - - require.Equal(t, UpdatePeer{ - publicKey: publicKey, - state: state, - }, ev) - }) -} diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 3d499fec5..a8b7376fa 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -185,13 +185,15 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { } invokerWitness := ln == 4 - // alphabet node should handle only notary requests - // that have been sent unsigned(by storage nodes) => - // such main TXs should have dummy scripts as an - // invocation script + // alphabet node should handle only notary requests that do not yet have inner + // ring multisignature filled => such main TXs either have empty invocation script + // of the inner ring witness (in case if Notary Actor is used to create request) + // or have it filled with dummy bytes (if request was created manually with the old + // neo-go API) // // this check prevents notary flow recursion - if !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { + if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || + bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version return ErrTXAlreadyHandled } @@ -218,12 +220,7 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { } // validate main TX expiration - err = p.validateExpiration(nr.FallbackTransaction) - if err != nil { - return err - } - - return nil + return p.validateExpiration(nr.FallbackTransaction) } func (p Preparator) validateParameterOpcodes(ops []Op) error { @@ -361,7 +358,9 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript) || len(w[last].VerificationScript) != 0 { + if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 + bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go index d0463348d..b2e46890b 100644 --- a/pkg/morph/event/notary_preparator_test.go +++ b/pkg/morph/event/notary_preparator_test.go @@ -1,6 +1,7 @@ package event import ( + "fmt" "testing" "github.com/nspcc-dev/neo-go/pkg/vm" @@ -24,8 +25,9 @@ var ( alphaKeys keys.PublicKeys wrongAlphaKeys keys.PublicKeys - dummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) - wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...) + dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in + dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually + wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...) scriptHash util.Uint160 ) @@ -61,35 +63,37 @@ func TestPrepare_IncorrectScript(t *testing.T) { }, ) - t.Run("not contract call", func(t *testing.T) { - bw := io.NewBufBinWriter() + for _, dummyMultisig := range []bool{true, false} { // try both empty and dummy multisig/Notary invocation witness script + t.Run(fmt.Sprintf("not contract call, compat: %t", dummyMultisig), func(t *testing.T) { + bw := io.NewBufBinWriter() - emit.Int(bw.BinWriter, 4) - emit.String(bw.BinWriter, "test") - emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) - emit.Syscall(bw.BinWriter, interopnames.SystemContractCallNative) // any != interopnames.SystemContractCall + emit.Int(bw.BinWriter, 4) + emit.String(bw.BinWriter, "test") + emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) + emit.Syscall(bw.BinWriter, interopnames.SystemContractCallNative) // any != interopnames.SystemContractCall - nr := correctNR(bw.Bytes(), false) + nr := correctNR(bw.Bytes(), dummyMultisig, false) - _, err := preparator.Prepare(nr) + _, err := preparator.Prepare(nr) - require.EqualError(t, err, errNotContractCall.Error()) - }) + require.EqualError(t, err, errNotContractCall.Error()) + }) - t.Run("incorrect ", func(t *testing.T) { - bw := io.NewBufBinWriter() + t.Run(fmt.Sprintf("incorrect, compat: %t", dummyMultisig), func(t *testing.T) { + bw := io.NewBufBinWriter() - emit.Int(bw.BinWriter, -1) - emit.String(bw.BinWriter, "test") - emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) - emit.Syscall(bw.BinWriter, interopnames.SystemContractCall) + emit.Int(bw.BinWriter, -1) + emit.String(bw.BinWriter, "test") + emit.Bytes(bw.BinWriter, scriptHash.BytesBE()) + emit.Syscall(bw.BinWriter, interopnames.SystemContractCall) - nr := correctNR(bw.Bytes(), false) + nr := correctNR(bw.Bytes(), dummyMultisig, false) - _, err := preparator.Prepare(nr) + _, err := preparator.Prepare(nr) - require.EqualError(t, err, errIncorrectCallFlag.Error()) - }) + require.EqualError(t, err, errIncorrectCallFlag.Error()) + }) + } } func TestPrepare_IncorrectNR(t *testing.T) { @@ -209,7 +213,23 @@ func TestPrepare_IncorrectNR(t *testing.T) { InvocationScript: make([]byte, 1), }, { - InvocationScript: dummyInvocationScript, + InvocationScript: dummyAlphabetInvocationScript, + }, + {}, + }, + }, + expErr: errIncorrectProxyWitnesses, + }, + { + name: "incorrect main TX proxy witness compat", + addW: false, + mTX: mTX{ + scripts: []transaction.Witness{ + { + InvocationScript: make([]byte, 1), + }, + { + InvocationScript: dummyAlphabetInvocationScriptOld, }, {}, }, @@ -224,7 +244,22 @@ func TestPrepare_IncorrectNR(t *testing.T) { {}, { VerificationScript: wrongAlphaVerificationScript, - InvocationScript: dummyInvocationScript, + InvocationScript: dummyAlphabetInvocationScript, + }, + {}, + }, + }, + expErr: errIncorrectAlphabet, + }, + { + name: "incorrect main TX Alphabet witness compat", + addW: false, + mTX: mTX{ + scripts: []transaction.Witness{ + {}, + { + VerificationScript: wrongAlphaVerificationScript, + InvocationScript: dummyAlphabetInvocationScriptOld, }, {}, }, @@ -239,7 +274,24 @@ func TestPrepare_IncorrectNR(t *testing.T) { {}, { VerificationScript: alphaVerificationScript, - InvocationScript: dummyInvocationScript, + InvocationScript: dummyAlphabetInvocationScript, + }, + { + InvocationScript: wrongDummyInvocationScript, + }, + }, + }, + expErr: errIncorrectNotaryPlaceholder, + }, + { + name: "incorrect main TX Notary witness compat", + addW: false, + mTX: mTX{ + scripts: []transaction.Witness{ + {}, + { + VerificationScript: alphaVerificationScript, + InvocationScript: dummyAlphabetInvocationScriptOld, }, { InvocationScript: wrongDummyInvocationScript, @@ -289,7 +341,23 @@ func TestPrepare_IncorrectNR(t *testing.T) { {}, { VerificationScript: alphaVerificationScript, - InvocationScript: dummyInvocationScript, + InvocationScript: dummyAlphabetInvocationScript, + }, + {}, + {}, + }, + }, + expErr: errIncorrectInvokerWitnesses, + }, + { + name: "incorrect invoker TX Alphabet witness compat", + addW: true, + mTX: mTX{ + scripts: []transaction.Witness{ + {}, + { + VerificationScript: alphaVerificationScript, + InvocationScript: dummyAlphabetInvocationScriptOld, }, {}, {}, @@ -327,7 +395,7 @@ func TestPrepare_IncorrectNR(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - correctNR := correctNR(nil, test.addW) + correctNR := correctNR(nil, false, test.addW) incorrectNR = setIncorrectFields(*correctNR, test.mTX, test.fbTX) _, err = preparator.Prepare(&incorrectNR) @@ -372,40 +440,42 @@ func TestPrepare_CorrectNR(t *testing.T) { for _, test := range tests { for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR - additionalWitness := i == 0 - nr := correctNR(script(test.hash, test.method, test.args...), additionalWitness) + for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness + additionalWitness := i == 0 + nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness) - event, err := preparator.Prepare(nr) + event, err := preparator.Prepare(nr) - require.NoError(t, err) - require.Equal(t, test.method, event.Type().String()) - require.Equal(t, test.hash.StringLE(), event.ScriptHash().StringLE()) - - // check args parsing - bw := io.NewBufBinWriter() - emit.Array(bw.BinWriter, test.args...) - - ctx := vm.NewContext(bw.Bytes()) - - opCode, param, err := ctx.Next() - require.NoError(t, err) - - for _, opGot := range event.Params() { - require.Equal(t, opCode, opGot.code) - require.Equal(t, param, opGot.param) - - opCode, param, err = ctx.Next() require.NoError(t, err) + require.Equal(t, test.method, event.Type().String()) + require.Equal(t, test.hash.StringLE(), event.ScriptHash().StringLE()) + + // check args parsing + bw := io.NewBufBinWriter() + emit.Array(bw.BinWriter, test.args...) + + ctx := vm.NewContext(bw.Bytes()) + + opCode, param, err := ctx.Next() + require.NoError(t, err) + + for _, opGot := range event.Params() { + require.Equal(t, opCode, opGot.code) + require.Equal(t, param, opGot.param) + + opCode, param, err = ctx.Next() + require.NoError(t, err) + } + + _, _, err = ctx.Next() // PACK opcode + require.NoError(t, err) + _, _, err = ctx.Next() // packing len opcode + require.NoError(t, err) + + opCode, _, err = ctx.Next() + require.NoError(t, err) + require.Equal(t, opcode.RET, opCode) } - - _, _, err = ctx.Next() // PACK opcode - require.NoError(t, err) - _, _, err = ctx.Next() // packing len opcode - require.NoError(t, err) - - opCode, _, err = ctx.Next() - require.NoError(t, err) - require.Equal(t, opcode.RET, opCode) } } } @@ -428,7 +498,7 @@ func script(hash util.Uint160, method string, args ...any) []byte { return bw.Bytes() } -func correctNR(script []byte, additionalWitness bool) *payload.P2PNotaryRequest { +func correctNR(script []byte, dummyMultisig, additionalWitness bool) *payload.P2PNotaryRequest { alphaVerificationScript, _ := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) signers := []transaction.Signer{ @@ -443,20 +513,24 @@ func correctNR(script []byte, additionalWitness bool) *payload.P2PNotaryRequest signers[2] = transaction.Signer{Account: hash.Hash160(alphaVerificationScript)} } + multisigInv := dummyAlphabetInvocationScript + if dummyMultisig { + multisigInv = dummyAlphabetInvocationScriptOld + } scripts := []transaction.Witness{ {}, { - InvocationScript: dummyInvocationScript, + InvocationScript: multisigInv, VerificationScript: alphaVerificationScript, }, { - InvocationScript: dummyInvocationScript, + InvocationScript: multisigInv, }, } if additionalWitness { // insert on element with index 2 scripts = append(scripts[:2+1], scripts[2:]...) scripts[2] = transaction.Witness{ - InvocationScript: dummyInvocationScript, + InvocationScript: multisigInv, VerificationScript: alphaVerificationScript, } } diff --git a/pkg/morph/event/reputation/put.go b/pkg/morph/event/reputation/put.go deleted file mode 100644 index a182bf26c..000000000 --- a/pkg/morph/event/reputation/put.go +++ /dev/null @@ -1,99 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// Put structure of reputation.reputationPut notification from -// morph chain. -type Put struct { - epoch uint64 - peerID reputation.PeerID - value reputation.GlobalTrust - - // For notary notifications only. - // Contains raw transactions of notary request. - notaryRequest *payload.P2PNotaryRequest -} - -const peerIDLength = 33 // compressed public key - -// MorphEvent implements Neo:Morph Event interface. -func (Put) MorphEvent() {} - -// Epoch returns epoch value of reputation data. -func (p Put) Epoch() uint64 { - return p.epoch -} - -// PeerID returns peer id of reputation data. -func (p Put) PeerID() reputation.PeerID { - return p.peerID -} - -// Value returns reputation structure. -func (p Put) Value() reputation.GlobalTrust { - return p.value -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (p Put) NotaryRequest() *payload.P2PNotaryRequest { - return p.notaryRequest -} - -// ParsePut from notification into reputation event structure. -func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Put - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse epoch number - epoch, err := client.IntFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get integer epoch number: %w", err) - } - - ev.epoch = uint64(epoch) - - // parse peer ID value - peerID, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get peer ID value: %w", err) - } - - if ln := len(peerID); ln != peerIDLength { - return nil, fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength) - } - - ev.peerID.SetPublicKey(peerID) - - // parse global trust value - rawValue, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get global trust value: %w", err) - } - - err = ev.value.Unmarshal(rawValue) - if err != nil { - return nil, fmt.Errorf("could not parse global trust value: %w", err) - } - - return ev, nil -} diff --git a/pkg/morph/event/reputation/put_notary.go b/pkg/morph/event/reputation/put_notary.go deleted file mode 100644 index f3cd749fc..000000000 --- a/pkg/morph/event/reputation/put_notary.go +++ /dev/null @@ -1,74 +0,0 @@ -package reputation - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" -) - -func (p *Put) setEpoch(v uint64) { - p.epoch = v -} - -func (p *Put) setPeerID(v []byte) error { - if ln := len(v); ln != peerIDLength { - return fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength) - } - - p.peerID.SetPublicKey(v) - - return nil -} - -func (p *Put) setValue(v []byte) error { - return p.value.Unmarshal(v) -} - -var fieldSetters = []func(*Put, []byte) error{ - // order on stack is reversed - (*Put).setValue, - (*Put).setPeerID, -} - -const ( - // PutNotaryEvent is method name for reputation put operations - // in `Reputation` contract. Is used as identifier for notary - // put reputation requests. - PutNotaryEvent = "put" -) - -// ParsePutNotary from NotaryEvent into reputation event structure. -func ParsePutNotary(ne event.NotaryEvent) (event.Event, error) { - var ev Put - - fieldNum := 0 - - for _, op := range ne.Params() { - switch fieldNum { - case 0, 1: - data, err := event.BytesFromOpcode(op) - if err != nil { - return nil, err - } - - err = fieldSetters[fieldNum](&ev, data) - if err != nil { - return nil, fmt.Errorf("can't parse field num %d: %w", fieldNum, err) - } - case 2: - n, err := event.IntFromOpcode(op) - if err != nil { - return nil, err - } - - ev.setEpoch(uint64(n)) - default: - return nil, event.UnexpectedArgNumErr(PutNotaryEvent) - } - fieldNum++ - } - - ev.notaryRequest = ne.Raw() - - return ev, nil -} diff --git a/pkg/morph/event/reputation/put_test.go b/pkg/morph/event/reputation/put_test.go deleted file mode 100644 index 46356b317..000000000 --- a/pkg/morph/event/reputation/put_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package reputation - -import ( - "math/big" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - reputationtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation/test" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParsePut(t *testing.T) { - var ( - peerID = reputationtest.PeerID() - - value reputation.GlobalTrust - trust reputation.Trust - trustValue float64 = 0.64 - - epoch uint64 = 42 - ) - - trust.SetValue(trustValue) - trust.SetPeer(peerID) - - value.SetTrust(trust) - - rawValue := value.Marshal() - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParsePut(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) - }) - - t.Run("wrong epoch parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong peerID parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong value parameter", func(t *testing.T) { - _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)), - stackitem.NewByteArray(peerID.PublicKey()), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)), - stackitem.NewByteArray(peerID.PublicKey()), - stackitem.NewByteArray(rawValue), - })) - require.NoError(t, err) - - require.Equal(t, Put{ - epoch: epoch, - peerID: peerID, - value: value, - }, ev) - }) -} - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/subnet/delete.go b/pkg/morph/event/subnet/delete.go deleted file mode 100644 index f46658b58..000000000 --- a/pkg/morph/event/subnet/delete.go +++ /dev/null @@ -1,63 +0,0 @@ -package subnetevents - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Delete structures information about the notification generated by Delete method of Subnet contract. -type Delete struct { - txHash util.Uint256 - - id []byte -} - -// MorphEvent implements Neo:Morph Event interface. -func (Delete) MorphEvent() {} - -// ID returns identifier of the removed subnet in a binary format of NeoFS API protocol. -func (x Delete) ID() []byte { - return x.id -} - -// TxHash returns hash of the transaction which thrown the notification event. -// Makes sense only in notary environments. -func (x Delete) TxHash() util.Uint256 { - return x.txHash -} - -// ParseDelete parses the notification about the removal of a subnet which has been thrown -// by the appropriate method of the Subnet contract. -// -// Resulting event is of Delete type. -func ParseDelete(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Delete - err error - ) - - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array: %w", err) - } - - const itemNumDelete = 1 - - if ln := len(items); ln != itemNumDelete { - return nil, event.WrongNumberOfParameters(itemNumDelete, ln) - } - - // parse ID - ev.id, err = client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("id item: %w", err) - } - - ev.txHash = e.Container - - return ev, nil -} diff --git a/pkg/morph/event/subnet/delete_test.go b/pkg/morph/event/subnet/delete_test.go deleted file mode 100644 index fc68bb227..000000000 --- a/pkg/morph/event/subnet/delete_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package subnetevents_test - -import ( - "testing" - - subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseDelete(t *testing.T) { - id := []byte("id") - - t.Run("wrong number of items", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewByteArray(nil), - stackitem.NewByteArray(nil), - } - - _, err := subnetevents.ParseDelete(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong id item", func(t *testing.T) { - _, err := subnetevents.ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := subnetevents.ParseDelete(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - })) - require.NoError(t, err) - - v := ev.(subnetevents.Delete) - - require.Equal(t, id, v.ID()) - }) -} diff --git a/pkg/morph/event/subnet/put.go b/pkg/morph/event/subnet/put.go deleted file mode 100644 index 3b1f5297d..000000000 --- a/pkg/morph/event/subnet/put.go +++ /dev/null @@ -1,147 +0,0 @@ -package subnetevents - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/network/payload" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// Put structures information about the notification generated by Put method of Subnet contract. -type Put struct { - notaryRequest *payload.P2PNotaryRequest - - txHash util.Uint256 - - id []byte - - owner []byte - - info []byte -} - -// MorphEvent implements Neo:Morph Event interface. -func (Put) MorphEvent() {} - -// ID returns identifier of the creating subnet in a binary format of FrostFS API protocol. -func (x Put) ID() []byte { - return x.id -} - -// Owner returns subnet owner's public key in a binary format. -func (x Put) Owner() []byte { - return x.owner -} - -// Info returns information about the subnet in a binary format of FrostFS API protocol. -func (x Put) Info() []byte { - return x.info -} - -// TxHash returns hash of the transaction which thrown the notification event. -// Makes sense only in notary environments. -func (x Put) TxHash() util.Uint256 { - return x.txHash -} - -// NotaryMainTx returns main transaction of the request in the Notary service. -// Returns nil in non-notary environments. -func (x Put) NotaryMainTx() *transaction.Transaction { - if x.notaryRequest != nil { - return x.notaryRequest.MainTransaction - } - - return nil -} - -// number of items in notification about subnet creation. -const itemNumPut = 3 - -// ParsePut parses the notification about the creation of a subnet which has been thrown -// by the appropriate method of the subnet contract. -// -// Resulting event is of Put type. -func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - put Put - err error - ) - - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array: %w", err) - } - - if ln := len(items); ln != itemNumPut { - return nil, event.WrongNumberOfParameters(itemNumPut, ln) - } - - // parse ID - put.id, err = client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("id item: %w", err) - } - - // parse owner - put.owner, err = client.BytesFromStackItem(items[1]) - if err != nil { - return nil, fmt.Errorf("owner item: %w", err) - } - - // parse info about subnet - put.info, err = client.BytesFromStackItem(items[2]) - if err != nil { - return nil, fmt.Errorf("info item: %w", err) - } - - put.txHash = e.Container - - return put, nil -} - -// ParseNotaryPut parses the notary notification about the creation of a subnet which has been -// thrown by the appropriate method of the subnet contract. -// -// Resulting event is of Put type. -func ParseNotaryPut(e event.NotaryEvent) (event.Event, error) { - var put Put - - put.notaryRequest = e.Raw() - if put.notaryRequest == nil { - panic(fmt.Sprintf("nil %T in notary environment", put.notaryRequest)) - } - - var ( - err error - - prms = e.Params() - ) - - if ln := len(prms); ln != itemNumPut { - return nil, event.WrongNumberOfParameters(itemNumPut, ln) - } - - // parse info about subnet - put.info, err = event.BytesFromOpcode(prms[0]) - if err != nil { - return nil, fmt.Errorf("info param: %w", err) - } - - // parse owner - put.owner, err = event.BytesFromOpcode(prms[1]) - if err != nil { - return nil, fmt.Errorf("creator param: %w", err) - } - - // parse ID - put.id, err = event.BytesFromOpcode(prms[2]) - if err != nil { - return nil, fmt.Errorf("id param: %w", err) - } - - return put, nil -} diff --git a/pkg/morph/event/subnet/put_test.go b/pkg/morph/event/subnet/put_test.go deleted file mode 100644 index 8a75b62c8..000000000 --- a/pkg/morph/event/subnet/put_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package subnetevents_test - -import ( - "testing" - - subnetevents "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParsePut(t *testing.T) { - var ( - id = []byte("id") - owner = []byte("owner") - info = []byte("info") - ) - - t.Run("wrong number of items", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewByteArray(nil), - stackitem.NewByteArray(nil), - } - - _, err := subnetevents.ParsePut(createNotifyEventFromItems(prms)) - require.Error(t, err) - }) - - t.Run("wrong id item", func(t *testing.T) { - _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong owner item", func(t *testing.T) { - _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("wrong info item", func(t *testing.T) { - _, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(owner), - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := subnetevents.ParsePut(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(id), - stackitem.NewByteArray(owner), - stackitem.NewByteArray(info), - })) - require.NoError(t, err) - - v := ev.(subnetevents.Put) - - require.Equal(t, id, v.ID()) - require.Equal(t, owner, v.Owner()) - require.Equal(t, info, v.Info()) - }) -} diff --git a/pkg/morph/event/subnet/remove_node.go b/pkg/morph/event/subnet/remove_node.go deleted file mode 100644 index 67bfb8918..000000000 --- a/pkg/morph/event/subnet/remove_node.go +++ /dev/null @@ -1,69 +0,0 @@ -package subnetevents - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// RemoveNode structure of subnet.RemoveNode notification from morph chain. -type RemoveNode struct { - subnetID []byte - nodeKey []byte - - // txHash is used in notary environmental - // for calculating unique but same for - // all notification receivers values. - txHash util.Uint256 -} - -// MorphEvent implements Neo:Morph Event interface. -func (RemoveNode) MorphEvent() {} - -// SubnetworkID returns a marshalled subnetID structure, defined in API. -func (rn RemoveNode) SubnetworkID() []byte { return rn.subnetID } - -// Node is public key of the nodeKey that is being deleted. -func (rn RemoveNode) Node() []byte { return rn.nodeKey } - -// TxHash returns hash of the TX with RemoveNode -// notification. -func (rn RemoveNode) TxHash() util.Uint256 { return rn.txHash } - -const expectedItemNumRemoveNode = 2 - -// ParseRemoveNode parses notification into subnet event structure. -// -// Expects 2 stack items. -func ParseRemoveNode(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev RemoveNode - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != expectedItemNumRemoveNode { - return nil, event.WrongNumberOfParameters(expectedItemNumRemoveNode, ln) - } - - ev.subnetID, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get raw subnetID: %w", err) - } - - ev.nodeKey, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get raw public key of the node: %w", err) - } - - ev.txHash = e.Container - - return ev, nil -} diff --git a/pkg/morph/event/subnet/remove_node_test.go b/pkg/morph/event/subnet/remove_node_test.go deleted file mode 100644 index 70fff4dc8..000000000 --- a/pkg/morph/event/subnet/remove_node_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package subnetevents_test - -import ( - "testing" - - . "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet" - subnetid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/subnet/id" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestParseRemoveNode(t *testing.T) { - t.Run("wrong number of arguments", func(t *testing.T) { - _, err := ParseRemoveNode(createNotifyEventFromItems([]stackitem.Item{})) - require.Error(t, err) - }) - - t.Run("invalid item type", func(t *testing.T) { - args := []stackitem.Item{stackitem.NewMap(), stackitem.Make(123)} - _, err := ParseRemoveNode(createNotifyEventFromItems(args)) - require.Error(t, err) - }) - - subnetID := subnetid.ID{} - subnetID.SetNumeric(123) - - rawSubnetID := subnetID.Marshal() - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - - pub := priv.PublicKey() - - t.Run("good", func(t *testing.T) { - args := []stackitem.Item{stackitem.NewByteArray(rawSubnetID), stackitem.Make(pub.Bytes())} - - e, err := ParseRemoveNode(createNotifyEventFromItems(args)) - require.NoError(t, err) - - gotRaw := e.(RemoveNode).SubnetworkID() - require.NoError(t, err) - - require.Equal(t, rawSubnetID, gotRaw) - require.Equal(t, pub.Bytes(), e.(RemoveNode).Node()) - }) -} - -func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent { - return &state.ContainedNotificationEvent{ - NotificationEvent: state.NotificationEvent{ - Item: stackitem.NewArray(items), - }, - } -} diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 355fd5b4d..2a7c6250d 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -89,7 +90,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle }) if err != nil { - log.Warn("could not Submit handler to worker pool", + log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool, zap.String("error", err.Error()), ) } diff --git a/pkg/morph/event/utils_test.go b/pkg/morph/event/utils_test.go new file mode 100644 index 000000000..83facc653 --- /dev/null +++ b/pkg/morph/event/utils_test.go @@ -0,0 +1,48 @@ +package event + +import ( + "math/big" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/core/state" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" + "github.com/stretchr/testify/require" +) + +func TestParseStackArray(t *testing.T) { + t.Run("success", func(t *testing.T) { + arr := &stackitem.Array{} + arr.Append(stackitem.NewBigInteger(big.NewInt(1))) + arr.Append(stackitem.NewBigInteger(big.NewInt(2))) + ev := &state.ContainedNotificationEvent{ + Container: util.Uint256{67}, + NotificationEvent: state.NotificationEvent{ + ScriptHash: util.Uint160{69}, + Name: "name", + Item: arr, + }, + } + + items, err := ParseStackArray(ev) + require.NoError(t, err, "failed to parse event items") + require.Equal(t, 2, len(items), "invalid length") + require.Equal(t, stackitem.NewBigInteger(big.NewInt(1)), items[0], "invalid item 0") + require.Equal(t, stackitem.NewBigInteger(big.NewInt(2)), items[1], "invalid item 0") + }) + t.Run("empty stack error", func(t *testing.T) { + arr := &stackitem.Array{} + ev := &state.ContainedNotificationEvent{ + Container: util.Uint256{67}, + NotificationEvent: state.NotificationEvent{ + ScriptHash: util.Uint160{69}, + Name: "name", + Item: arr, + }, + } + + items, err := ParseStackArray(ev) + require.ErrorIs(t, err, errEmptyStackArray, "invalid empty array error") + require.Equal(t, 0, len(items), "items was returned") + }) +} diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go index 17bed5b2d..383d58407 100644 --- a/pkg/morph/subscriber/subscriber.go +++ b/pkg/morph/subscriber/subscriber.go @@ -6,12 +6,13 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -26,7 +27,6 @@ type ( // Subscriber is an interface of the NotificationEvent listener. Subscriber interface { SubscribeForNotification(...util.Uint160) error - UnsubscribeForNotification() BlockNotifications() error SubscribeForNotaryRequests(mainTXSigner util.Uint160) error @@ -35,16 +35,27 @@ type ( Close() } + subChannels struct { + NotifyChan chan *state.ContainedNotificationEvent + BlockChan chan *block.Block + NotaryChan chan *result.NotaryRequestEvent + } + subscriber struct { *sync.RWMutex log *logger.Logger client *client.Client notifyChan chan *state.ContainedNotificationEvent - - blockChan chan *block.Block - + blockChan chan *block.Block notaryChan chan *result.NotaryRequestEvent + + current subChannels + + // cached subscription information + subscribedEvents map[util.Uint160]bool + subscribedNotaryEvents map[util.Uint160]bool + subscribedToNewBlocks bool } // Params is a group of Subscriber constructor parameters. @@ -75,116 +86,66 @@ func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error { s.Lock() defer s.Unlock() - notifyIDs := make(map[util.Uint160]struct{}, len(contracts)) + notifyIDs := make([]string, 0, len(contracts)) for i := range contracts { + if s.subscribedEvents[contracts[i]] { + continue + } // subscribe to contract notifications - err := s.client.SubscribeForExecutionNotifications(contracts[i]) + id, err := s.client.ReceiveExecutionNotifications(contracts[i], s.current.NotifyChan) if err != nil { // if there is some error, undo all subscriptions and return error - for hash := range notifyIDs { - _ = s.client.UnsubscribeContract(hash) + for _, id := range notifyIDs { + _ = s.client.Unsubscribe(id) } return err } // save notification id - notifyIDs[contracts[i]] = struct{}{} + notifyIDs = append(notifyIDs, id) + } + for i := range contracts { + s.subscribedEvents[contracts[i]] = true } return nil } -func (s *subscriber) UnsubscribeForNotification() { - err := s.client.UnsubscribeAll() - if err != nil { - s.log.Error("unsubscribe for notification", - zap.Error(err)) - } -} - func (s *subscriber) Close() { s.client.Close() } func (s *subscriber) BlockNotifications() error { - if err := s.client.SubscribeForNewBlocks(); err != nil { + s.Lock() + defer s.Unlock() + if s.subscribedToNewBlocks { + return nil + } + if _, err := s.client.ReceiveBlocks(s.current.BlockChan); err != nil { return fmt.Errorf("could not subscribe for new block events: %w", err) } + s.subscribedToNewBlocks = true + return nil } func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error { - if err := s.client.SubscribeForNotaryRequests(mainTXSigner); err != nil { + s.Lock() + defer s.Unlock() + if s.subscribedNotaryEvents[mainTXSigner] { + return nil + } + if _, err := s.client.ReceiveNotaryRequests(mainTXSigner, s.current.NotaryChan); err != nil { return fmt.Errorf("could not subscribe for notary request events: %w", err) } + s.subscribedNotaryEvents[mainTXSigner] = true return nil } -func (s *subscriber) routeNotifications(ctx context.Context) { - notificationChan := s.client.NotificationChannel() - - for { - select { - case <-ctx.Done(): - return - case notification, ok := <-notificationChan: - if !ok { - s.log.Warn("remote notification channel has been closed") - close(s.notifyChan) - close(s.blockChan) - close(s.notaryChan) - - return - } - - switch notification.Type { - case neorpc.NotificationEventID: - notifyEvent, ok := notification.Value.(*state.ContainedNotificationEvent) - if !ok { - s.log.Error("can't cast notify event value to the notify struct", - zap.String("received type", fmt.Sprintf("%T", notification.Value)), - ) - continue - } - - s.log.Debug("new notification event from sidechain", - zap.String("name", notifyEvent.Name), - ) - - s.notifyChan <- notifyEvent - case neorpc.BlockEventID: - b, ok := notification.Value.(*block.Block) - if !ok { - s.log.Error("can't cast block event value to block", - zap.String("received type", fmt.Sprintf("%T", notification.Value)), - ) - continue - } - - s.blockChan <- b - case neorpc.NotaryRequestEventID: - notaryRequest, ok := notification.Value.(*result.NotaryRequestEvent) - if !ok { - s.log.Error("can't cast notify event value to the notary request struct", - zap.String("received type", fmt.Sprintf("%T", notification.Value)), - ) - continue - } - - s.notaryChan <- notaryRequest - default: - s.log.Debug("unsupported notification from the chain", - zap.Uint8("type", uint8(notification.Type)), - ) - } - } - } -} - // New is a constructs Neo:Morph event listener and returns Subscriber interface. func New(ctx context.Context, p *Params) (Subscriber, error) { switch { @@ -208,16 +169,170 @@ func New(ctx context.Context, p *Params) (Subscriber, error) { notifyChan: make(chan *state.ContainedNotificationEvent), blockChan: make(chan *block.Block), notaryChan: make(chan *result.NotaryRequestEvent), - } - // Worker listens all events from neo-go websocket and puts them - // into corresponding channel. It may be notifications, transactions, - // new blocks. For now only notifications. + current: newSubChannels(), + + subscribedEvents: make(map[util.Uint160]bool), + subscribedNotaryEvents: make(map[util.Uint160]bool), + } + // Worker listens all events from temporary NeoGo channel and puts them + // into corresponding permanent channels. go sub.routeNotifications(ctx) return sub, nil } +func (s *subscriber) routeNotifications(ctx context.Context) { + var ( + // TODO: not needed after nspcc-dev/neo-go#2980. + cliCh = s.client.NotificationChannel() + restoreCh = make(chan bool) + restoreInProgress bool + ) + +routeloop: + for { + var connLost bool + s.RLock() + curr := s.current + s.RUnlock() + select { + case <-ctx.Done(): + break routeloop + case ev, ok := <-curr.NotifyChan: + if ok { + s.notifyChan <- ev + } else { + connLost = true + } + case ev, ok := <-curr.BlockChan: + if ok { + s.blockChan <- ev + } else { + connLost = true + } + case ev, ok := <-curr.NotaryChan: + if ok { + s.notaryChan <- ev + } else { + connLost = true + } + case _, ok := <-cliCh: + connLost = !ok + case ok := <-restoreCh: + restoreInProgress = false + if !ok { + connLost = true + } + } + if connLost { + if !restoreInProgress { + restoreInProgress, cliCh = s.switchEndpoint(ctx, restoreCh) + if !restoreInProgress { + break routeloop + } + curr.drain() + } else { // Avoid getting additional !ok events. + s.Lock() + s.current.NotifyChan = nil + s.current.BlockChan = nil + s.current.NotaryChan = nil + s.Unlock() + } + } + } + close(s.notifyChan) + close(s.blockChan) + close(s.notaryChan) +} + +func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) (bool, <-chan rpcclient.Notification) { + s.log.Info("RPC connection lost, attempting reconnect") + if !s.client.SwitchRPC(ctx) { + s.log.Error("can't switch RPC node") + return false, nil + } + + cliCh := s.client.NotificationChannel() + + s.Lock() + chs := newSubChannels() + go func() { + finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan) + }() + s.current = chs + s.Unlock() + + return true, cliCh +} + +func newSubChannels() subChannels { + return subChannels{ + NotifyChan: make(chan *state.ContainedNotificationEvent), + BlockChan: make(chan *block.Block), + NotaryChan: make(chan *result.NotaryRequestEvent), + } +} + +func (s *subChannels) drain() { +drainloop: + for { + select { + case _, ok := <-s.NotifyChan: + if !ok { + s.NotifyChan = nil + } + case _, ok := <-s.BlockChan: + if !ok { + s.BlockChan = nil + } + case _, ok := <-s.NotaryChan: + if !ok { + s.NotaryChan = nil + } + default: + break drainloop + } + } +} + +// restoreSubscriptions restores subscriptions according to +// cached information about them. +func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent, + blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent) bool { + var err error + + // new block events restoration + if s.subscribedToNewBlocks { + _, err = s.client.ReceiveBlocks(blCh) + if err != nil { + s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) + return false + } + } + + // notification events restoration + for contract := range s.subscribedEvents { + contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890 + _, err = s.client.ReceiveExecutionNotifications(contract, notifCh) + if err != nil { + s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + return false + } + } + + // notary notification events restoration + for signer := range s.subscribedNotaryEvents { + signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890 + _, err = s.client.ReceiveNotaryRequests(signer, notaryCh) + if err != nil { + s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + return false + } + } + return true +} + // awaitHeight checks if remote client has least expected block height and // returns error if it is not reached that height after timeout duration. // This function is required to avoid connections to unsynced RPC nodes, because diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go index 549e98b65..371d3c76f 100644 --- a/pkg/network/cache/client.go +++ b/pkg/network/cache/client.go @@ -38,7 +38,7 @@ func NewSDKClientCache(opts ClientCacheOpts) *ClientCache { } // Get function returns existing client or creates a new one. -func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) { +func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error) { netAddr := info.AddressGroup() if c.opts.AllowExternal { netAddr = append(netAddr, info.ExternalAddressGroup()...) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 39c191b78..b8a0aa4bc 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -47,7 +47,7 @@ func newMultiClient(addr network.AddressGroup, opts ClientCacheOpts) *multiClien } } -func (x *multiClient) createForAddress(addr network.Address) (clientcore.Client, error) { +func (x *multiClient) createForAddress(ctx context.Context, addr network.Address) (clientcore.Client, error) { var ( c client.Client prmInit client.PrmInit @@ -73,7 +73,7 @@ func (x *multiClient) createForAddress(addr network.Address) (clientcore.Client, } c.Init(prmInit) - err := c.Dial(prmDial) + err := c.Dial(ctx, prmDial) if err != nil { return nil, fmt.Errorf("can't init SDK client: %w", err) } @@ -144,7 +144,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie var err error - c, err := x.client(addr) + c, err := x.client(ctx, addr) if err == nil { err = f(c) } @@ -277,25 +277,7 @@ func (x *multiClient) ObjectSearchInit(ctx context.Context, p client.PrmObjectSe return } -func (x *multiClient) AnnounceLocalTrust(ctx context.Context, prm client.PrmAnnounceLocalTrust) (res *client.ResAnnounceLocalTrust, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.AnnounceLocalTrust(ctx, prm) - return err - }) - - return -} - -func (x *multiClient) AnnounceIntermediateTrust(ctx context.Context, prm client.PrmAnnounceIntermediateTrust) (res *client.ResAnnounceIntermediateTrust, err error) { - err = x.iterateClients(ctx, func(c clientcore.Client) error { - res, err = c.AnnounceIntermediateTrust(ctx, prm) - return err - }) - - return -} - -func (x *multiClient) ExecRaw(f func(client *rawclient.Client) error) error { +func (x *multiClient) ExecRaw(func(client *rawclient.Client) error) error { panic("multiClient.ExecRaw() must not be called") } @@ -315,8 +297,8 @@ func (x *multiClient) Close() error { return nil } -func (x *multiClient) RawForAddress(addr network.Address, f func(client *rawclient.Client) error) error { - c, err := x.client(addr) +func (x *multiClient) RawForAddress(ctx context.Context, addr network.Address, f func(client *rawclient.Client) error) error { + c, err := x.client(ctx, addr) if err != nil { return err } @@ -328,7 +310,7 @@ func (x *multiClient) RawForAddress(addr network.Address, f func(client *rawclie return err } -func (x *multiClient) client(addr network.Address) (clientcore.Client, error) { +func (x *multiClient) client(ctx context.Context, addr network.Address) (clientcore.Client, error) { strAddr := addr.String() x.mtx.RLock() @@ -369,7 +351,7 @@ func (x *multiClient) client(addr network.Address) (clientcore.Client, error) { return nil, errRecentlyFailed } - cl, err := x.createForAddress(addr) + cl, err := x.createForAddress(ctx, addr) if err != nil { c.lastAttempt = time.Now() return nil, err diff --git a/pkg/network/transport/reputation/grpc/service.go b/pkg/network/transport/reputation/grpc/service.go deleted file mode 100644 index bb9074324..000000000 --- a/pkg/network/transport/reputation/grpc/service.go +++ /dev/null @@ -1,50 +0,0 @@ -package grpcreputation - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation" - reputation2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc" - reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc" -) - -// Server wraps FrostFS API v2 Reputation service server -// and provides gRPC Reputation service server interface. -type Server struct { - srv reputationrpc.Server -} - -// New creates, initializes and returns Server instance. -func New(srv reputationrpc.Server) *Server { - return &Server{ - srv: srv, - } -} - -func (s *Server) AnnounceLocalTrust(ctx context.Context, r *reputation2.AnnounceLocalTrustRequest) (*reputation2.AnnounceLocalTrustResponse, error) { - req := new(reputation.AnnounceLocalTrustRequest) - if err := req.FromGRPCMessage(r); err != nil { - return nil, err - } - - resp, err := s.srv.AnnounceLocalTrust(ctx, req) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*reputation2.AnnounceLocalTrustResponse), nil -} - -func (s *Server) AnnounceIntermediateResult(ctx context.Context, r *reputation2.AnnounceIntermediateResultRequest) (*reputation2.AnnounceIntermediateResultResponse, error) { - req := new(reputation.AnnounceIntermediateResultRequest) - if err := req.FromGRPCMessage(r); err != nil { - return nil, err - } - - resp, err := s.srv.AnnounceIntermediateResult(ctx, req) - if err != nil { - return nil, err - } - - return resp.ToGRPCMessage().(*reputation2.AnnounceIntermediateResultResponse), nil -} diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index 434c89508..ac836b71d 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { } } -func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { +func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing account") diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go deleted file mode 100644 index bf720c330..000000000 --- a/pkg/services/audit/auditor/context.go +++ /dev/null @@ -1,297 +0,0 @@ -package auditor - -import ( - "context" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/atomic" - "go.uber.org/zap" -) - -// Context represents container data audit execution context. -type Context struct { - ContextPrm - - task *audit.Task - - report *audit.Report - - sgMembersMtx sync.RWMutex - sgMembersCache map[oid.ID][]oid.ID - - placementMtx sync.Mutex - placementCache map[string][][]netmap.NodeInfo - - porRequests, porRetries atomic.Uint32 - - pairs []gamePair - - pairedMtx sync.Mutex - pairedNodes map[uint64]*pairMemberInfo - - counters struct { - hit, miss, fail uint32 - } - - cnrNodesNum int - - headMtx sync.RWMutex - headResponses map[string]shortHeader -} - -type pairMemberInfo struct { - failedPDP, passedPDP bool // at least one - - node netmap.NodeInfo -} - -type gamePair struct { - n1, n2 netmap.NodeInfo - - id oid.ID - - rn1, rn2 []*object.Range - - hh1, hh2 [][]byte -} - -type shortHeader struct { - tzhash []byte - - objectSize uint64 -} - -// ContextPrm groups components required to conduct data audit checks. -type ContextPrm struct { - maxPDPSleep uint64 - - log *logger.Logger - - cnrCom ContainerCommunicator - - pdpWorkerPool, porWorkerPool util.WorkerPool -} - -type commonCommunicatorPrm struct { - Node netmap.NodeInfo - - OID oid.ID - CID cid.ID -} - -// GetHeaderPrm groups parameter of GetHeader operation. -type GetHeaderPrm struct { - commonCommunicatorPrm - - NodeIsRelay bool -} - -// GetRangeHashPrm groups parameter of GetRangeHash operation. -type GetRangeHashPrm struct { - commonCommunicatorPrm - - Range *object.Range -} - -// ContainerCommunicator is an interface of -// component of communication with container nodes. -type ContainerCommunicator interface { - // GetHeader must return object header from the container node. - GetHeader(context.Context, GetHeaderPrm) (*object.Object, error) - - // GetRangeHash must return homomorphic Tillich-Zemor hash of payload range of the - // object stored in container node. - GetRangeHash(context.Context, GetRangeHashPrm) ([]byte, error) -} - -// NewContext creates, initializes and returns Context. -func NewContext(prm ContextPrm) *Context { - return &Context{ - ContextPrm: prm, - } -} - -// SetLogger sets logging component. -func (p *ContextPrm) SetLogger(l *logger.Logger) { - if p != nil { - p.log = l - } -} - -// SetContainerCommunicator sets component of communication with container nodes. -func (p *ContextPrm) SetContainerCommunicator(cnrCom ContainerCommunicator) { - if p != nil { - p.cnrCom = cnrCom - } -} - -// SetMaxPDPSleep sets maximum sleep interval between range hash requests. -// as part of PDP check. -func (p *ContextPrm) SetMaxPDPSleep(dur time.Duration) { - if p != nil { - p.maxPDPSleep = uint64(dur) - } -} - -// WithTask sets container audit parameters. -func (c *Context) WithTask(t *audit.Task) *Context { - if c != nil { - c.task = t - } - - return c -} - -// WithPDPWorkerPool sets worker pool for PDP pairs processing. -func (c *Context) WithPDPWorkerPool(pool util.WorkerPool) *Context { - if c != nil { - c.pdpWorkerPool = pool - } - - return c -} - -// WithPoRWorkerPool sets worker pool for PoR SG processing. -func (c *Context) WithPoRWorkerPool(pool util.WorkerPool) *Context { - if c != nil { - c.porWorkerPool = pool - } - - return c -} - -func (c *Context) containerID() cid.ID { - return c.task.ContainerID() -} - -func (c *Context) init() { - c.report = audit.NewReport(c.containerID()) - - c.sgMembersCache = make(map[oid.ID][]oid.ID) - - c.placementCache = make(map[string][][]netmap.NodeInfo) - - cnrVectors := c.task.ContainerNodes() - for i := range cnrVectors { - c.cnrNodesNum += len(cnrVectors[i]) - } - - c.pairedNodes = make(map[uint64]*pairMemberInfo) - - c.headResponses = make(map[string]shortHeader) - - c.log = &logger.Logger{Logger: c.log.With( - zap.Stringer("container ID", c.task.ContainerID()), - )} -} - -func (c *Context) expired(ctx context.Context) bool { - select { - case <-ctx.Done(): - c.log.Debug("audit context is done", - zap.String("error", ctx.Err().Error()), - ) - - return true - default: - return false - } -} - -func (c *Context) complete() { - c.report.Complete() -} - -func (c *Context) writeReport() { - c.log.Debug("writing audit report...") - - if err := c.task.Reporter().WriteReport(c.report); err != nil { - c.log.Error("could not write audit report") - } -} - -func (c *Context) buildPlacement(id oid.ID) ([][]netmap.NodeInfo, error) { - c.placementMtx.Lock() - defer c.placementMtx.Unlock() - - strID := id.EncodeToString() - - if nn, ok := c.placementCache[strID]; ok { - return nn, nil - } - - nn, err := placement.BuildObjectPlacement( - c.task.NetworkMap(), - c.task.ContainerNodes(), - &id, - ) - if err != nil { - return nil, err - } - - c.placementCache[strID] = nn - - return nn, nil -} - -func (c *Context) objectSize(id oid.ID) uint64 { - c.headMtx.RLock() - defer c.headMtx.RUnlock() - - strID := id.EncodeToString() - - if hdr, ok := c.headResponses[strID]; ok { - return hdr.objectSize - } - - return 0 -} - -func (c *Context) objectHomoHash(id oid.ID) []byte { - c.headMtx.RLock() - defer c.headMtx.RUnlock() - - strID := id.EncodeToString() - - if hdr, ok := c.headResponses[strID]; ok { - return hdr.tzhash - } - - return nil -} - -func (c *Context) updateHeadResponses(hdr *object.Object) { - id, ok := hdr.ID() - if !ok { - return - } - - strID := id.EncodeToString() - cs, _ := hdr.PayloadHomomorphicHash() - - c.headMtx.Lock() - defer c.headMtx.Unlock() - - if _, ok := c.headResponses[strID]; !ok { - c.headResponses[strID] = shortHeader{ - tzhash: cs.Value(), - objectSize: hdr.PayloadSize(), - } - } -} - -func (c *Context) updateSGInfo(id oid.ID, members []oid.ID) { - c.sgMembersMtx.Lock() - defer c.sgMembersMtx.Unlock() - - c.sgMembersCache[id] = members -} diff --git a/pkg/services/audit/auditor/exec.go b/pkg/services/audit/auditor/exec.go deleted file mode 100644 index e603818b8..000000000 --- a/pkg/services/audit/auditor/exec.go +++ /dev/null @@ -1,37 +0,0 @@ -package auditor - -import ( - "context" - "fmt" -) - -// Execute audits container data. -func (c *Context) Execute(ctx context.Context, onCompleted func()) { - defer onCompleted() - c.init() - - checks := []struct { - name string - exec func(context.Context) - }{ - {name: "PoR", exec: c.executePoR}, - {name: "PoP", exec: c.executePoP}, - {name: "PDP", exec: c.executePDP}, - } - - for i := range checks { - c.log.Debug(fmt.Sprintf("executing %s check...", checks[i].name)) - - if c.expired(ctx) { - break - } - - checks[i].exec(ctx) - - if i == len(checks)-1 { - c.complete() - } - } - - c.writeReport() -} diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go deleted file mode 100644 index 8a184eb7e..000000000 --- a/pkg/services/audit/auditor/pdp.go +++ /dev/null @@ -1,239 +0,0 @@ -package auditor - -import ( - "bytes" - "context" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/tzhash/tz" - "go.uber.org/zap" -) - -func (c *Context) executePDP(ctx context.Context) { - c.processPairs(ctx) - c.writePairsResult() -} - -func (c *Context) processPairs(ctx context.Context) { - wg := new(sync.WaitGroup) - - for i := range c.pairs { - p := &c.pairs[i] - wg.Add(1) - - if err := c.pdpWorkerPool.Submit(func() { - c.processPair(ctx, p) - wg.Done() - }); err != nil { - wg.Done() - } - } - - wg.Wait() - c.pdpWorkerPool.Release() -} - -func (c *Context) processPair(ctx context.Context, p *gamePair) { - c.distributeRanges(p) - c.collectHashes(ctx, p) - c.analyzeHashes(p) -} - -func (c *Context) distributeRanges(p *gamePair) { - p.rn1 = make([]*object.Range, hashRangeNumber-1) - p.rn2 = make([]*object.Range, hashRangeNumber-1) - - for i := 0; i < hashRangeNumber-1; i++ { - p.rn1[i] = object.NewRange() - p.rn2[i] = object.NewRange() - } - - notches := c.splitPayload(p.id) - - { // node 1 - // [0:n2] - p.rn1[0].SetLength(notches[1]) - - // [n2:n3] - p.rn1[1].SetOffset(notches[1]) - p.rn1[1].SetLength(notches[2] - notches[1]) - - // [n3:full] - p.rn1[2].SetOffset(notches[2]) - p.rn1[2].SetLength(notches[3] - notches[2]) - } - - { // node 2 - // [0:n1] - p.rn2[0].SetLength(notches[0]) - - // [n1:n2] - p.rn2[1].SetOffset(notches[0]) - p.rn2[1].SetLength(notches[1] - notches[0]) - - // [n2:full] - p.rn2[2].SetOffset(notches[1]) - p.rn2[2].SetLength(notches[3] - notches[1]) - } -} - -func (c *Context) splitPayload(id oid.ID) []uint64 { - var ( - prev uint64 - size = c.objectSize(id) - notches = make([]uint64, 0, hashRangeNumber) - ) - - for i := uint64(0); i < hashRangeNumber; i++ { - if i < hashRangeNumber-1 { - max := size - prev - (hashRangeNumber - i) - if max == 0 { - prev++ - } else { - prev += rand.Uint64()%max + 1 - } - } else { - prev = size - } - - notches = append(notches, prev) - } - - return notches -} - -func (c *Context) collectHashes(ctx context.Context, p *gamePair) { - fn := func(n netmap.NodeInfo, rngs []*object.Range) [][]byte { - // Here we randomize the order a bit: the hypothesis is that this - // makes it harder for an unscrupulous node to come up with a - // reliable cheating strategy. - order := make([]int, len(rngs)) - for i := range order { - order[i] = i - } - rand.Shuffle(len(order), func(i, j int) { order[i], order[j] = order[j], order[i] }) - - var getRangeHashPrm GetRangeHashPrm - getRangeHashPrm.CID = c.task.ContainerID() - getRangeHashPrm.OID = p.id - getRangeHashPrm.Node = n - - res := make([][]byte, len(rngs)) - for _, i := range order { - var sleepDur time.Duration - if c.maxPDPSleep > 0 { - sleepDur = time.Duration(rand.Uint64() % c.maxPDPSleep) - } - - c.log.Debug("sleep before get range hash", - zap.Stringer("interval", sleepDur), - ) - - time.Sleep(sleepDur) - - getRangeHashPrm.Range = rngs[i] - - h, err := c.cnrCom.GetRangeHash(ctx, getRangeHashPrm) - if err != nil { - c.log.Debug("could not get payload range hash", - zap.Stringer("id", p.id), - zap.String("node", netmap.StringifyPublicKey(n)), - zap.String("error", err.Error()), - ) - return res - } - res[i] = h - } - return res - } - - p.hh1 = fn(p.n1, p.rn1) - p.hh2 = fn(p.n2, p.rn2) -} - -func (c *Context) analyzeHashes(p *gamePair) { - if len(p.hh1) != hashRangeNumber-1 || len(p.hh2) != hashRangeNumber-1 { - c.failNodesPDP(p.n1, p.n2) - return - } - - h1, err := tz.Concat([][]byte{p.hh2[0], p.hh2[1]}) - if err != nil || !bytes.Equal(p.hh1[0], h1) { - c.failNodesPDP(p.n1, p.n2) - return - } - - h2, err := tz.Concat([][]byte{p.hh1[1], p.hh1[2]}) - if err != nil || !bytes.Equal(p.hh2[2], h2) { - c.failNodesPDP(p.n1, p.n2) - return - } - - fh, err := tz.Concat([][]byte{h1, h2}) - if err != nil || !bytes.Equal(fh, c.objectHomoHash(p.id)) { - c.failNodesPDP(p.n1, p.n2) - return - } - - c.passNodesPDP(p.n1, p.n2) -} - -func (c *Context) failNodesPDP(ns ...netmap.NodeInfo) { - c.pairedMtx.Lock() - - for i := range ns { - c.pairedNodes[ns[i].Hash()].failedPDP = true - } - - c.pairedMtx.Unlock() -} - -func (c *Context) passNodesPDP(ns ...netmap.NodeInfo) { - c.pairedMtx.Lock() - - for i := range ns { - c.pairedNodes[ns[i].Hash()].passedPDP = true - } - - c.pairedMtx.Unlock() -} - -func (c *Context) writePairsResult() { - var failCount, okCount int - - c.iteratePairedNodes( - func(netmap.NodeInfo) { failCount++ }, - func(netmap.NodeInfo) { okCount++ }, - ) - - failedNodes := make([][]byte, 0, failCount) - passedNodes := make([][]byte, 0, okCount) - - c.iteratePairedNodes( - func(n netmap.NodeInfo) { - failedNodes = append(failedNodes, n.PublicKey()) - }, - func(n netmap.NodeInfo) { - passedNodes = append(passedNodes, n.PublicKey()) - }, - ) - - c.report.SetPDPResults(passedNodes, failedNodes) -} - -func (c *Context) iteratePairedNodes(onFail, onPass func(netmap.NodeInfo)) { - for _, pairedNode := range c.pairedNodes { - if pairedNode.failedPDP { - onFail(pairedNode.node) - } - - if pairedNode.passedPDP { - onPass(pairedNode.node) - } - } -} diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go deleted file mode 100644 index 32b837794..000000000 --- a/pkg/services/audit/auditor/pop.go +++ /dev/null @@ -1,186 +0,0 @@ -package auditor - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/tzhash/tz" - "go.uber.org/zap" -) - -const ( - hashRangeNumber = 4 - minGamePayloadSize = hashRangeNumber * tz.Size -) - -func (c *Context) executePoP(ctx context.Context) { - c.buildCoverage(ctx) - - c.report.SetPlacementCounters( - c.counters.hit, - c.counters.miss, - c.counters.fail, - ) -} - -func (c *Context) buildCoverage(ctx context.Context) { - policy := c.task.ContainerStructure().PlacementPolicy() - - // select random member from another storage group - // and process all placement vectors - c.iterateSGMembersPlacementRand(func(id oid.ID, ind int, nodes []netmap.NodeInfo) bool { - c.processObjectPlacement(ctx, id, nodes, policy.ReplicaNumberByIndex(ind)) - return c.containerCovered() - }) -} - -func (c *Context) containerCovered() bool { - // number of container nodes can be calculated once - return c.cnrNodesNum <= len(c.pairedNodes) -} - -func (c *Context) processObjectPlacement(ctx context.Context, id oid.ID, nodes []netmap.NodeInfo, replicas uint32) { - var ( - ok uint32 - optimal bool - - unpairedCandidate1, unpairedCandidate2 = -1, -1 - - pairedCandidate = -1 - ) - - var getHeaderPrm GetHeaderPrm - getHeaderPrm.OID = id - getHeaderPrm.CID = c.task.ContainerID() - getHeaderPrm.NodeIsRelay = false - - for i := 0; ok < replicas && i < len(nodes); i++ { - getHeaderPrm.Node = nodes[i] - - // try to get object header from node - hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm) - if err != nil { - c.log.Debug("could not get object header from candidate", - zap.Stringer("id", id), - zap.String("error", err.Error()), - ) - - continue - } - - c.updateHeadResponses(hdr) - - // increment success counter - ok++ - - // update optimal flag - optimal = ok == replicas && uint32(i) < replicas - - // exclude small objects from coverage - if c.objectSize(id) < minGamePayloadSize { - continue - } - - // update potential candidates to be paired - if _, ok := c.pairedNodes[nodes[i].Hash()]; !ok { - if unpairedCandidate1 < 0 { - unpairedCandidate1 = i - } else if unpairedCandidate2 < 0 { - unpairedCandidate2 = i - } - } else if pairedCandidate < 0 { - pairedCandidate = i - } - } - - if optimal { - c.counters.hit++ - } else if ok == replicas { - c.counters.miss++ - } else { - c.counters.fail++ - } - - if unpairedCandidate1 >= 0 { - if unpairedCandidate2 >= 0 { - c.composePair(id, nodes[unpairedCandidate1], nodes[unpairedCandidate2]) - } else if pairedCandidate >= 0 { - c.composePair(id, nodes[unpairedCandidate1], nodes[pairedCandidate]) - } - } -} - -func (c *Context) composePair(id oid.ID, n1, n2 netmap.NodeInfo) { - c.pairs = append(c.pairs, gamePair{ - n1: n1, - n2: n2, - id: id, - }) - - c.pairedNodes[n1.Hash()] = &pairMemberInfo{ - node: n1, - } - c.pairedNodes[n2.Hash()] = &pairMemberInfo{ - node: n2, - } -} - -func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.NodeInfo) bool) { - // iterate over storage groups members for all storage groups (one by one) - // with randomly shuffled members - c.iterateSGMembersRand(func(id oid.ID) bool { - // build placement vector for the current object - nn, err := c.buildPlacement(id) - if err != nil { - c.log.Debug("could not build placement for object", - zap.Stringer("id", id), - zap.String("error", err.Error()), - ) - - return false - } - - for i, nodes := range nn { - if f(id, i, nodes) { - return true - } - } - - return false - }) -} - -func (c *Context) iterateSGMembersRand(f func(oid.ID) bool) { - c.iterateSGInfo(func(members []oid.ID) bool { - ln := len(members) - - processed := make(map[uint64]struct{}, ln-1) - - for len(processed) < ln { - ind := nextRandUint64(uint64(ln), processed) - processed[ind] = struct{}{} - - if f(members[ind]) { - return true - } - } - - return false - }) -} - -func (c *Context) iterateSGInfo(f func([]oid.ID) bool) { - c.sgMembersMtx.RLock() - defer c.sgMembersMtx.RUnlock() - - // we can add randomization like for SG members, - // but list of storage groups is already expected - // to be shuffled since it is a Search response - // with unpredictable order - for i := range c.sgMembersCache { - if f(c.sgMembersCache[i]) { - return - } - } -} diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go deleted file mode 100644 index aebc25c68..000000000 --- a/pkg/services/audit/auditor/por.go +++ /dev/null @@ -1,155 +0,0 @@ -package auditor - -import ( - "bytes" - "context" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" - containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - storagegroupSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "git.frostfs.info/TrueCloudLab/tzhash/tz" - "go.uber.org/zap" -) - -func (c *Context) executePoR(ctx context.Context) { - wg := new(sync.WaitGroup) - sgs := c.task.StorageGroupList() - - for _, sg := range sgs { - wg.Add(1) - - if err := c.porWorkerPool.Submit(func() { - c.checkStorageGroupPoR(ctx, sg.ID(), sg.StorageGroup()) - wg.Done() - }); err != nil { - wg.Done() - } - } - - wg.Wait() - c.porWorkerPool.Release() - - c.report.SetPoRCounters(c.porRequests.Load(), c.porRetries.Load()) -} - -func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg storagegroupSDK.StorageGroup) { - members := sg.Members() - c.updateSGInfo(sgID, members) - - var ( - tzHash []byte - totalSize uint64 - - accRequests, accRetries uint32 - ) - - var getHeaderPrm GetHeaderPrm - getHeaderPrm.CID = c.task.ContainerID() - getHeaderPrm.NodeIsRelay = true - - homomorphicHashingEnabled := !containerSDK.IsHomomorphicHashingDisabled(c.task.ContainerStructure()) - - for i := range members { - flat, ok := c.getShuffledNodes(members[i], sgID) - if !ok { - continue - } - - getHeaderPrm.OID = members[i] - - for j := range flat { - accRequests++ - if j > 0 { // in best case audit get object header on first iteration - accRetries++ - } - - getHeaderPrm.Node = flat[j] - - hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm) - if err != nil { - c.log.Debug("can't head object", - zap.String("remote_node", netmap.StringifyPublicKey(flat[j])), - zap.Stringer("oid", members[i]), - ) - - continue - } - - // update cache for PoR and PDP audit checks - c.updateHeadResponses(hdr) - - if homomorphicHashingEnabled { - cs, _ := hdr.PayloadHomomorphicHash() - if len(tzHash) == 0 { - tzHash = cs.Value() - } else { - tzHash, err = tz.Concat([][]byte{ - tzHash, - cs.Value(), - }) - if err != nil { - c.log.Debug("can't concatenate tz hash", - zap.String("oid", members[i].String()), - zap.String("error", err.Error())) - - break - } - } - } - - totalSize += hdr.PayloadSize() - - break - } - } - - c.porRequests.Add(accRequests) - c.porRetries.Add(accRetries) - - sizeCheck := sg.ValidationDataSize() == totalSize - cs, _ := sg.ValidationDataHash() - tzCheck := !homomorphicHashingEnabled || bytes.Equal(tzHash, cs.Value()) - - c.writeCheckReport(sizeCheck, tzCheck, sgID, sg, totalSize) -} - -func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg storagegroupSDK.StorageGroup, totalSize uint64) { - if sizeCheck && tzCheck { - c.report.PassedPoR(sgID) - } else { - if !sizeCheck { - c.log.Debug("storage group size check failed", - zap.Uint64("expected", sg.ValidationDataSize()), - zap.Uint64("got", totalSize)) - } - - if !tzCheck { - c.log.Debug("storage group tz hash check failed") - } - - c.report.FailedPoR(sgID) - } -} - -func (c *Context) getShuffledNodes(member oid.ID, sgID oid.ID) ([]netmap.NodeInfo, bool) { - objectPlacement, err := c.buildPlacement(member) - if err != nil { - c.log.Info("can't build placement for storage group member", - zap.Stringer("sg", sgID), - zap.String("member_id", member.String()), - ) - - return nil, false - } - - flat := placement.FlattenNodes(objectPlacement) - - rand.Shuffle(len(flat), func(i, j int) { - flat[i], flat[j] = flat[j], flat[i] - }) - return flat, true -} diff --git a/pkg/services/audit/auditor/util.go b/pkg/services/audit/auditor/util.go deleted file mode 100644 index 5f8685534..000000000 --- a/pkg/services/audit/auditor/util.go +++ /dev/null @@ -1,18 +0,0 @@ -package auditor - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" -) - -// nextRandUint64 returns random uint64 number [0; n) outside exclude map. -// Panics if len(exclude) >= n. -func nextRandUint64(n uint64, exclude map[uint64]struct{}) uint64 { - ln := uint64(len(exclude)) - ind := rand.Uint64() % (n - ln) - - for i := ind; ; i++ { - if _, ok := exclude[i]; !ok { - return i - } - } -} diff --git a/pkg/services/audit/report.go b/pkg/services/audit/report.go deleted file mode 100644 index f16f97384..000000000 --- a/pkg/services/audit/report.go +++ /dev/null @@ -1,89 +0,0 @@ -package audit - -import ( - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// Report tracks the progress of auditing container data. -type Report struct { - mu sync.RWMutex - res audit.Result -} - -// Reporter is an interface of the entity that records -// the data audit report. -type Reporter interface { - WriteReport(r *Report) error -} - -// NewReport creates and returns blank Report instance. -func NewReport(cnr cid.ID) *Report { - var rep Report - rep.res.ForContainer(cnr) - - return &rep -} - -// Result forms the structure of the data audit result. -func (r *Report) Result() *audit.Result { - r.mu.RLock() - defer r.mu.RUnlock() - - return &r.res -} - -// Complete completes audit report. -func (r *Report) Complete() { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.Complete() -} - -// PassedPoR updates list of passed storage groups. -func (r *Report) PassedPoR(sg oid.ID) { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.SubmitPassedStorageGroup(sg) -} - -// FailedPoR updates list of failed storage groups. -func (r *Report) FailedPoR(sg oid.ID) { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.SubmitFailedStorageGroup(sg) -} - -// SetPlacementCounters sets counters of compliance with placement. -func (r *Report) SetPlacementCounters(hit, miss, fail uint32) { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.SetHits(hit) - r.res.SetMisses(miss) - r.res.SetFailures(fail) -} - -// SetPDPResults sets lists of nodes according to their PDP results. -func (r *Report) SetPDPResults(passed, failed [][]byte) { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.SubmitPassedStorageNodes(passed) - r.res.SubmitFailedStorageNodes(failed) -} - -// SetPoRCounters sets amounts of head requests and retries at PoR audit stage. -func (r *Report) SetPoRCounters(requests, retries uint32) { - r.mu.Lock() - defer r.mu.Unlock() - - r.res.SetRequestsPoR(requests) - r.res.SetRetriesPoR(retries) -} diff --git a/pkg/services/audit/task.go b/pkg/services/audit/task.go deleted file mode 100644 index 3de5ac2c6..000000000 --- a/pkg/services/audit/task.go +++ /dev/null @@ -1,120 +0,0 @@ -package audit - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -// Task groups groups the container audit parameters. -type Task struct { - cancelCh <-chan struct{} - - reporter Reporter - - idCnr cid.ID - - cnr container.Container - - nm *netmap.NetMap - - cnrNodes [][]netmap.NodeInfo - - sgList []storagegroup.StorageGroup -} - -// WithReporter sets audit report writer. -func (t *Task) WithReporter(r Reporter) *Task { - if t != nil { - t.reporter = r - } - - return t -} - -// Reporter returns audit report writer. -func (t *Task) Reporter() Reporter { - return t.reporter -} - -func (t *Task) WithCancelChannel(ch <-chan struct{}) *Task { - if ch != nil { - t.cancelCh = ch - } - return t -} - -func (t *Task) CancelChannel() <-chan struct{} { - return t.cancelCh -} - -// WithContainerID sets identifier of the container under audit. -func (t *Task) WithContainerID(cnr cid.ID) *Task { - if t != nil { - t.idCnr = cnr - } - - return t -} - -// ContainerID returns identifier of the container under audit. -func (t *Task) ContainerID() cid.ID { - return t.idCnr -} - -// WithContainerStructure sets structure of the container under audit. -func (t *Task) WithContainerStructure(cnr container.Container) *Task { - if t != nil { - t.cnr = cnr - } - - return t -} - -// ContainerStructure returns structure of the container under audit. -func (t *Task) ContainerStructure() container.Container { - return t.cnr -} - -// WithContainerNodes sets nodes in the container under audit. -func (t *Task) WithContainerNodes(cnrNodes [][]netmap.NodeInfo) *Task { - if t != nil { - t.cnrNodes = cnrNodes - } - - return t -} - -// NetworkMap returns network map of audit epoch. -func (t *Task) NetworkMap() *netmap.NetMap { - return t.nm -} - -// WithNetworkMap sets network map of audit epoch. -func (t *Task) WithNetworkMap(nm *netmap.NetMap) *Task { - if t != nil { - t.nm = nm - } - - return t -} - -// ContainerNodes returns nodes in the container under audit. -func (t *Task) ContainerNodes() [][]netmap.NodeInfo { - return t.cnrNodes -} - -// WithStorageGroupList sets a list of storage groups from container under audit. -func (t *Task) WithStorageGroupList(sgList []storagegroup.StorageGroup) *Task { - if t != nil { - t.sgList = sgList - } - - return t -} - -// StorageGroupList returns list of storage groups from container under audit. -func (t *Task) StorageGroupList() []storagegroup.StorageGroup { - return t.sgList -} diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go deleted file mode 100644 index a16052e13..000000000 --- a/pkg/services/audit/taskmanager/listen.go +++ /dev/null @@ -1,84 +0,0 @@ -package audittask - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor" - "go.uber.org/zap" -) - -// Listen starts the process of processing tasks from the queue. -// -// The listener is terminated by context. -func (m *Manager) Listen(ctx context.Context) { - m.log.Info("process routine", - zap.Uint32("queue_capacity", m.queueCap), - ) - - m.ch = make(chan *audit.Task, m.queueCap) - - for { - select { - case <-ctx.Done(): - m.log.Warn("stop listener by context", - zap.String("error", ctx.Err().Error()), - ) - m.workerPool.Release() - - return - case task, ok := <-m.ch: - if !ok { - m.log.Warn("queue channel is closed") - return - } - - tCtx, tCancel := context.WithCancel(ctx) // cancel task in case of listen cancel - go func() { - select { - case <-tCtx.Done(): // listen cancelled or task completed - return - case <-task.CancelChannel(): // new epoch - tCancel() - } - }() - - m.handleTask(tCtx, task, tCancel) - } - } -} - -func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted func()) { - pdpPool, err := m.pdpPoolGenerator() - if err != nil { - m.log.Error("could not generate PDP worker pool", - zap.String("error", err.Error()), - ) - onCompleted() - return - } - - porPool, err := m.pdpPoolGenerator() - if err != nil { - m.log.Error("could not generate PoR worker pool", - zap.String("error", err.Error()), - ) - onCompleted() - return - } - - auditContext := m.generateContext(task). - WithPDPWorkerPool(pdpPool). - WithPoRWorkerPool(porPool) - - if err := m.workerPool.Submit(func() { auditContext.Execute(ctx, onCompleted) }); err != nil { - // may be we should report it - m.log.Warn("could not submit audit task") - onCompleted() - } -} - -func (m *Manager) generateContext(task *audit.Task) *auditor.Context { - return auditor.NewContext(m.ctxPrm). - WithTask(task) -} diff --git a/pkg/services/audit/taskmanager/manager.go b/pkg/services/audit/taskmanager/manager.go deleted file mode 100644 index bf7698799..000000000 --- a/pkg/services/audit/taskmanager/manager.go +++ /dev/null @@ -1,107 +0,0 @@ -package audittask - -import ( - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Manager represents an entity performing data audit tasks. -type Manager struct { - *cfg - - ch chan *audit.Task -} - -// Option is a Manager's constructor option. -type Option func(*cfg) - -type cfg struct { - queueCap uint32 - - log *logger.Logger - - ctxPrm auditor.ContextPrm - - workerPool util.WorkerPool - - pdpPoolGenerator, porPoolGenerator func() (util.WorkerPool, error) -} - -func defaultCfg() *cfg { - return &cfg{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// New creates, initializes and returns new Manager instance. -func New(opts ...Option) *Manager { - c := defaultCfg() - - for i := range opts { - opts[i](c) - } - - return &Manager{ - cfg: c, - } -} - -// WithLogger returns option to specify Manager's logger. -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Audit task manager"))} - c.ctxPrm.SetLogger(l) - } -} - -// WithWorkerPool returns option to set worker pool -// for task execution. -func WithWorkerPool(p util.WorkerPool) Option { - return func(c *cfg) { - c.workerPool = p - } -} - -// WithQueueCapacity returns option to set task queue capacity. -func WithQueueCapacity(capacity uint32) Option { - return func(c *cfg) { - c.queueCap = capacity - } -} - -// WithContainerCommunicator returns option to set component of communication -// with container nodes. -func WithContainerCommunicator(cnrCom auditor.ContainerCommunicator) Option { - return func(c *cfg) { - c.ctxPrm.SetContainerCommunicator(cnrCom) - } -} - -// WithMaxPDPSleepInterval returns option to set maximum sleep interval -// between range hash requests as part of PDP check. -func WithMaxPDPSleepInterval(dur time.Duration) Option { - return func(c *cfg) { - c.ctxPrm.SetMaxPDPSleep(dur) - } -} - -// WithPDPWorkerPoolGenerator returns option to set worker pool for PDP pairs processing. -// Callback caller owns returned pool and must release it appropriately. -func WithPDPWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option { - return func(c *cfg) { - c.pdpPoolGenerator = f - } -} - -// WithPoRWorkerPoolGenerator returns option to set worker pool for PoR SG processing. -// Callback caller owns returned pool and must release it appropriately. -func WithPoRWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option { - return func(c *cfg) { - c.porPoolGenerator = f - } -} diff --git a/pkg/services/audit/taskmanager/push.go b/pkg/services/audit/taskmanager/push.go deleted file mode 100644 index 805897dbf..000000000 --- a/pkg/services/audit/taskmanager/push.go +++ /dev/null @@ -1,10 +0,0 @@ -package audittask - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit" -) - -// PushTask adds a task to the queue for processing. -func (m *Manager) PushTask(t *audit.Task) { - m.ch <- t -} diff --git a/pkg/services/audit/taskmanager/reset.go b/pkg/services/audit/taskmanager/reset.go deleted file mode 100644 index 86f2538cf..000000000 --- a/pkg/services/audit/taskmanager/reset.go +++ /dev/null @@ -1,11 +0,0 @@ -package audittask - -// Reset pops all tasks from the queue. -// Returns amount of popped elements. -func (m *Manager) Reset() (popped int) { - for ; len(m.ch) > 0; popped++ { - <-m.ch - } - - return -} diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go index f5d5d1a3d..e1ed6e496 100644 --- a/pkg/services/container/announcement/load/controller/calls.go +++ b/pkg/services/container/announcement/load/controller/calls.go @@ -3,6 +3,7 @@ package loadcontroller import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" @@ -52,7 +53,7 @@ func (c *Controller) Start(ctx context.Context, prm StartPrm) { } func (c *announcer) announce(ctx context.Context) { - c.log.Debug("starting to announce the values of the metrics") + c.log.Debug(logs.ControllerStartingToAnnounceTheValuesOfTheMetrics) var ( metricsIterator Iterator @@ -62,7 +63,7 @@ func (c *announcer) announce(ctx context.Context) { // initialize iterator over locally collected metrics metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator() if err != nil { - c.log.Debug("could not initialize iterator over locally collected metrics", + c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics, zap.String("error", err.Error()), ) @@ -72,7 +73,7 @@ func (c *announcer) announce(ctx context.Context) { // initialize target of local announcements targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(nil) if err != nil { - c.log.Debug("could not initialize announcement accumulator", + c.log.Debug(logs.ControllerCouldNotInitializeAnnouncementAccumulator, zap.String("error", err.Error()), ) @@ -90,7 +91,7 @@ func (c *announcer) announce(ctx context.Context) { }, ) if err != nil { - c.log.Debug("iterator over locally collected metrics aborted", + c.log.Debug(logs.ControllerIteratorOverLocallyCollectedMetricsAborted, zap.String("error", err.Error()), ) @@ -100,14 +101,14 @@ func (c *announcer) announce(ctx context.Context) { // finish writing err = targetWriter.Close(ctx) if err != nil { - c.log.Debug("could not finish writing local announcements", + c.log.Debug(logs.ControllerCouldNotFinishWritingLocalAnnouncements, zap.String("error", err.Error()), ) return } - c.log.Debug("trust announcement successfully finished") + c.log.Debug(logs.ControllerTrustAnnouncementSuccessfullyFinished) } func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (context.Context, *announcer) { @@ -127,7 +128,7 @@ func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (con )} if started { - log.Debug("announcement is already started") + log.Debug(logs.ControllerAnnouncementIsAlreadyStarted) return ctx, nil } @@ -159,9 +160,9 @@ func (c *commonContext) freeAnnouncement() { c.ctrl.announceMtx.Unlock() if stopped { - c.log.Debug("announcement successfully interrupted") + c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted) } else { - c.log.Debug("announcement is not started or already interrupted") + c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted) } } @@ -219,7 +220,7 @@ func (c *Controller) acquireReport(ctx context.Context, prm StopPrm) (context.Co )} if started { - log.Debug("report is already started") + log.Debug(logs.ControllerReportIsAlreadyStarted) return ctx, nil } @@ -251,9 +252,9 @@ func (c *commonContext) freeReport() { c.ctrl.reportMtx.Unlock() if stopped { - c.log.Debug("announcement successfully interrupted") + c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted) } else { - c.log.Debug("announcement is not started or already interrupted") + c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted) } } @@ -266,7 +267,7 @@ func (c *reporter) report(ctx context.Context) { // initialize iterator over locally accumulated announcements localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator() if err != nil { - c.log.Debug("could not initialize iterator over locally accumulated announcements", + c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements, zap.String("error", err.Error()), ) @@ -276,7 +277,7 @@ func (c *reporter) report(ctx context.Context) { // initialize final destination of load estimations resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(nil) if err != nil { - c.log.Debug("could not initialize result target", + c.log.Debug(logs.ControllerCouldNotInitializeResultTarget, zap.String("error", err.Error()), ) @@ -289,7 +290,7 @@ func (c *reporter) report(ctx context.Context) { resultWriter.Put, ) if err != nil { - c.log.Debug("iterator over local announcements aborted", + c.log.Debug(logs.ControllerIteratorOverLocalAnnouncementsAborted, zap.String("error", err.Error()), ) @@ -299,7 +300,7 @@ func (c *reporter) report(ctx context.Context) { // finish writing err = resultWriter.Close(ctx) if err != nil { - c.log.Debug("could not finish writing load estimations", + c.log.Debug(logs.ControllerCouldNotFinishWritingLoadEstimations, zap.String("error", err.Error()), ) } diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go index 83c368f57..9a483aed0 100644 --- a/pkg/services/container/announcement/load/route/calls.go +++ b/pkg/services/container/announcement/load/route/calls.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" @@ -97,7 +98,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { if !ok { provider, err := w.router.remoteProvider.InitRemote(remoteInfo) if err != nil { - w.router.log.Debug("could not initialize writer provider", + w.router.log.Debug(logs.RouteCouldNotInitializeWriterProvider, zap.String("error", err.Error()), ) @@ -106,7 +107,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { remoteWriter, err = provider.InitWriter(w.route) if err != nil { - w.router.log.Debug("could not initialize writer", + w.router.log.Debug(logs.RouteCouldNotInitializeWriter, zap.String("error", err.Error()), ) @@ -118,7 +119,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { err := remoteWriter.Put(a) if err != nil { - w.router.log.Debug("could not put the value", + w.router.log.Debug(logs.RouteCouldNotPutTheValue, zap.String("error", err.Error()), ) } @@ -133,7 +134,7 @@ func (w *loadWriter) Close(ctx context.Context) error { for key, wRemote := range w.mServers { err := wRemote.Close(ctx) if err != nil { - w.router.log.Debug("could not close remote server writer", + w.router.log.Debug(logs.RouteCouldNotCloseRemoteServerWriter, zap.String("key", key), zap.String("error", err.Error()), ) diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index 42035c8d0..8e6b30856 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -136,7 +136,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body * return new(container.DeleteResponseBody), nil } -func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { +func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -176,7 +176,7 @@ func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) return res, nil } -func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { +func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, fmt.Errorf("missing user ID") @@ -205,7 +205,7 @@ func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBod return res, nil } -func (s *morphExecutor) SetExtendedACL(ctx context.Context, tokV2 *sessionV2.Token, body *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) { +func (s *morphExecutor) SetExtendedACL(_ context.Context, tokV2 *sessionV2.Token, body *container.SetExtendedACLRequestBody) (*container.SetExtendedACLResponseBody, error) { sigV2 := body.GetSignature() if sigV2 == nil { // TODO(@cthulhu-rider): #1387 use "const" error @@ -238,7 +238,7 @@ func (s *morphExecutor) SetExtendedACL(ctx context.Context, tokV2 *sessionV2.Tok return new(container.SetExtendedACLResponseBody), nil } -func (s *morphExecutor) GetExtendedACL(ctx context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) { +func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go index f7582dd68..fd6f020d1 100644 --- a/pkg/services/control/convert.go +++ b/pkg/services/control/convert.go @@ -14,207 +14,26 @@ func (w *requestWrapper) ToGRPCMessage() grpc.Message { return w.m } -type healthCheckResponseWrapper struct { - m *HealthCheckResponse +type responseWrapper[T grpc.Message] struct { + message *T } -func (w *healthCheckResponseWrapper) ToGRPCMessage() grpc.Message { - return w.m +func newResponseWrapper[T grpc.Message]() *responseWrapper[T] { + return &responseWrapper[T]{ + message: new(T), + } } -func (w *healthCheckResponseWrapper) FromGRPCMessage(m grpc.Message) error { - var ok bool +func (w *responseWrapper[T]) ToGRPCMessage() grpc.Message { + return w.message +} - w.m, ok = m.(*HealthCheckResponse) +func (w *responseWrapper[T]) FromGRPCMessage(m grpc.Message) error { + response, ok := m.(*T) if !ok { - return message.NewUnexpectedMessageType(m, w.m) + return message.NewUnexpectedMessageType(m, w.message) } - return nil -} - -type setNetmapStatusResponseWrapper struct { - message.Message - m *SetNetmapStatusResponse -} - -func (w *setNetmapStatusResponseWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -func (w *setNetmapStatusResponseWrapper) FromGRPCMessage(m grpc.Message) error { - var ok bool - - w.m, ok = m.(*SetNetmapStatusResponse) - if !ok { - return message.NewUnexpectedMessageType(m, w.m) - } - - return nil -} - -type dropObjectsResponseWrapper struct { - message.Message - m *DropObjectsResponse -} - -func (w *dropObjectsResponseWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -func (w *dropObjectsResponseWrapper) FromGRPCMessage(m grpc.Message) error { - var ok bool - - w.m, ok = m.(*DropObjectsResponse) - if !ok { - return message.NewUnexpectedMessageType(m, w.m) - } - - return nil -} - -type listShardsResponseWrapper struct { - m *ListShardsResponse -} - -func (w *listShardsResponseWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -func (w *listShardsResponseWrapper) FromGRPCMessage(m grpc.Message) error { - var ok bool - - w.m, ok = m.(*ListShardsResponse) - if !ok { - return message.NewUnexpectedMessageType(m, w.m) - } - - return nil -} - -type setShardModeResponseWrapper struct { - m *SetShardModeResponse -} - -func (w *setShardModeResponseWrapper) ToGRPCMessage() grpc.Message { - return w.m -} - -func (w *setShardModeResponseWrapper) FromGRPCMessage(m grpc.Message) error { - var ok bool - - w.m, ok = m.(*SetShardModeResponse) - if !ok { - return message.NewUnexpectedMessageType(m, w.m) - } - - return nil -} - -type dumpShardResponseWrapper struct { - *DumpShardResponse -} - -func (w *dumpShardResponseWrapper) ToGRPCMessage() grpc.Message { - return w.DumpShardResponse -} - -func (w *dumpShardResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*DumpShardResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*DumpShardResponse)(nil)) - } - - w.DumpShardResponse = r - return nil -} - -type restoreShardResponseWrapper struct { - *RestoreShardResponse -} - -func (w *restoreShardResponseWrapper) ToGRPCMessage() grpc.Message { - return w.RestoreShardResponse -} - -func (w *restoreShardResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*RestoreShardResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*RestoreShardResponse)(nil)) - } - - w.RestoreShardResponse = r - return nil -} - -type synchronizeTreeResponseWrapper struct { - *SynchronizeTreeResponse -} - -func (w *synchronizeTreeResponseWrapper) ToGRPCMessage() grpc.Message { - return w.SynchronizeTreeResponse -} - -func (w *synchronizeTreeResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*SynchronizeTreeResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*SynchronizeTreeResponse)(nil)) - } - - w.SynchronizeTreeResponse = r - return nil -} - -type evacuateShardResponseWrapper struct { - *EvacuateShardResponse -} - -func (w *evacuateShardResponseWrapper) ToGRPCMessage() grpc.Message { - return w.EvacuateShardResponse -} - -func (w *evacuateShardResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*EvacuateShardResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*EvacuateShardResponse)(nil)) - } - - w.EvacuateShardResponse = r - return nil -} - -type flushCacheResponseWrapper struct { - *FlushCacheResponse -} - -func (w *flushCacheResponseWrapper) ToGRPCMessage() grpc.Message { - return w.FlushCacheResponse -} - -func (w *flushCacheResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*FlushCacheResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*FlushCacheResponse)(nil)) - } - - w.FlushCacheResponse = r - return nil -} - -type doctorResponseWrapper struct { - *DoctorResponse -} - -func (w *doctorResponseWrapper) ToGRPCMessage() grpc.Message { - return w.DoctorResponse -} - -func (w *doctorResponseWrapper) FromGRPCMessage(m grpc.Message) error { - r, ok := m.(*DoctorResponse) - if !ok { - return message.NewUnexpectedMessageType(m, (*DoctorResponse)(nil)) - } - - w.DoctorResponse = r + w.message = response return nil } diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go index 01bc48724..c892c5b6c 100644 --- a/pkg/services/control/ir/convert.go +++ b/pkg/services/control/ir/convert.go @@ -14,18 +14,18 @@ func (w *requestWrapper) ToGRPCMessage() grpc.Message { return w.m } -type healthCheckResponseWrapper struct { - m *HealthCheckResponse +type responseWrapper[M grpc.Message] struct { + m M } -func (w *healthCheckResponseWrapper) ToGRPCMessage() grpc.Message { +func (w *responseWrapper[M]) ToGRPCMessage() grpc.Message { return w.m } -func (w *healthCheckResponseWrapper) FromGRPCMessage(m grpc.Message) error { +func (w *responseWrapper[M]) FromGRPCMessage(m grpc.Message) error { var ok bool - w.m, ok = m.(*HealthCheckResponse) + w.m, ok = m.(M) if !ok { return message.NewUnexpectedMessageType(m, w.m) } diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go index a8b16b607..1b635c149 100644 --- a/pkg/services/control/ir/rpc.go +++ b/pkg/services/control/ir/rpc.go @@ -3,12 +3,15 @@ package control import ( "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" ) const serviceName = "ircontrol.ControlService" const ( rpcHealthCheck = "HealthCheck" + rpcTickEpoch = "TickEpoch" + rpcRemoveNode = "RemoveNode" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -17,15 +20,37 @@ func HealthCheck( req *HealthCheckRequest, opts ...client.CallOption, ) (*HealthCheckResponse, error) { - wResp := &healthCheckResponseWrapper{ - m: new(HealthCheckResponse), + return sendUnary[HealthCheckRequest, HealthCheckResponse](cli, rpcHealthCheck, req, opts...) +} + +// TickEpoch executes ControlService.TickEpoch RPC. +func TickEpoch( + cli *client.Client, + req *TickEpochRequest, + opts ...client.CallOption, +) (*TickEpochResponse, error) { + return sendUnary[TickEpochRequest, TickEpochResponse](cli, rpcTickEpoch, req, opts...) +} + +func RemoveNode( + cli *client.Client, + req *RemoveNodeRequest, + opts ...client.CallOption, +) (*RemoveNodeResponse, error) { + return sendUnary[RemoveNodeRequest, RemoveNodeResponse](cli, rpcRemoveNode, req, opts...) +} + +func sendUnary[I, O grpc.Message](cli *client.Client, rpcName string, req *I, opts ...client.CallOption) (*O, error) { + var resp O + wResp := &responseWrapper[*O]{ + m: &resp, } wReq := &requestWrapper{ m: req, } - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcHealthCheck), wReq, wResp, opts...) + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcName), wReq, wResp, opts...) if err != nil { return nil, err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 986da90f1..680d1e606 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -1,8 +1,11 @@ package control import ( + "bytes" "context" + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -12,12 +15,10 @@ import ( // // If request is not signed with a key from white list, permission error returns. func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) (*control.HealthCheckResponse, error) { - // verify request if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } - // create and fill response resp := new(control.HealthCheckResponse) body := new(control.HealthCheckResponse_Body) @@ -25,7 +26,73 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) body.SetHealthStatus(s.prm.healthChecker.HealthStatus()) - // sign the response + if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +// TickEpoch forces a new epoch. +// +// If request is not signed with a key from white list, permission error returns. +func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { + if err := s.isValidRequest(req); err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + resp := new(control.TickEpochResponse) + resp.SetBody(new(control.TickEpochResponse_Body)) + + epoch, err := s.netmapClient.Epoch() + if err != nil { + return nil, fmt.Errorf("getting current epoch: %w", err) + } + + if err := s.netmapClient.NewEpoch(epoch+1, true); err != nil { + return nil, fmt.Errorf("forcing new epoch: %w", err) + } + + if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return resp, nil +} + +// RemoveNode forces a node removal. +// +// If request is not signed with a key from white list, permission error returns. +func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { + if err := s.isValidRequest(req); err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + resp := new(control.RemoveNodeResponse) + resp.SetBody(new(control.RemoveNodeResponse_Body)) + + nm, err := s.netmapClient.NetMap() + if err != nil { + return nil, fmt.Errorf("getting netmap: %w", err) + } + var nodeInfo netmap.NodeInfo + for _, info := range nm.Nodes() { + if bytes.Equal(info.PublicKey(), req.GetBody().GetKey()) { + nodeInfo = info + break + } + } + if len(nodeInfo.PublicKey()) == 0 { + return nil, status.Error(codes.NotFound, "no such node") + } + if nodeInfo.IsOffline() { + return nil, status.Error(codes.FailedPrecondition, "node is already offline") + } + + if err := s.netmapClient.ForceRemovePeer(nodeInfo); err != nil { + return nil, fmt.Errorf("forcing node removal: %w", err) + } + if err := SignMessage(&s.prm.key.PrivateKey, resp); err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index c75c1504e..dc00809a6 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -2,6 +2,8 @@ package control import ( "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" ) // Server is an entity that serves @@ -10,7 +12,8 @@ import ( // To gain access to the service, any request must be // signed with a key from the white list. type Server struct { - prm Prm + prm Prm + netmapClient *netmap.Client allowedKeys [][]byte } @@ -29,7 +32,7 @@ func panicOnPrmValue(n string, v any) { // Forms white list from all keys specified via // WithAllowedKeys option and a public key of // the parameterized private key. -func New(prm Prm, opts ...Option) *Server { +func New(prm Prm, netmapClient *netmap.Client, opts ...Option) *Server { // verify required parameters switch { case prm.healthChecker == nil: @@ -44,7 +47,8 @@ func New(prm Prm, opts ...Option) *Server { } return &Server{ - prm: prm, + prm: prm, + netmapClient: netmapClient, allowedKeys: append(o.allowedKeys, prm.key.PublicKey().Bytes()), } diff --git a/pkg/services/control/ir/service.go b/pkg/services/control/ir/service.go index dc04e4904..b2db2b43a 100644 --- a/pkg/services/control/ir/service.go +++ b/pkg/services/control/ir/service.go @@ -20,3 +20,27 @@ func (x *HealthCheckResponse) SetBody(v *HealthCheckResponse_Body) { x.Body = v } } + +func (x *TickEpochRequest) SetBody(v *TickEpochRequest_Body) { + if x != nil { + x.Body = v + } +} + +func (x *TickEpochResponse) SetBody(v *TickEpochResponse_Body) { + if x != nil { + x.Body = v + } +} + +func (x *RemoveNodeRequest) SetBody(v *RemoveNodeRequest_Body) { + if x != nil { + x.Body = v + } +} + +func (x *RemoveNodeResponse) SetBody(v *RemoveNodeResponse_Body) { + if x != nil { + x.Body = v + } +} diff --git a/pkg/services/control/ir/service.pb.go b/pkg/services/control/ir/service.pb.go index 9f2834706..bec74a3be 100644 Binary files a/pkg/services/control/ir/service.pb.go and b/pkg/services/control/ir/service.pb.go differ diff --git a/pkg/services/control/ir/service.proto b/pkg/services/control/ir/service.proto index 5f99be16b..d647db0df 100644 --- a/pkg/services/control/ir/service.proto +++ b/pkg/services/control/ir/service.proto @@ -10,6 +10,10 @@ option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/ir/ service ControlService { // Performs health check of the IR node. rpc HealthCheck (HealthCheckRequest) returns (HealthCheckResponse); + // Forces a new epoch to be signaled by the IR node with high probability. + rpc TickEpoch (TickEpochRequest) returns (TickEpochResponse); + // Forces a node removal to be signaled by the IR node with high probability. + rpc RemoveNode (RemoveNodeRequest) returns (RemoveNodeResponse); } // Health check request. @@ -41,3 +45,33 @@ message HealthCheckResponse { // Body signature. Signature signature = 2; } + +message TickEpochRequest { + message Body{} + + Body body = 1; + Signature signature = 2; +} + +message TickEpochResponse { + message Body{} + + Body body = 1; + Signature signature = 2; +} + +message RemoveNodeRequest { + message Body{ + bytes key = 1; + } + + Body body = 1; + Signature signature = 2; +} + +message RemoveNodeResponse { + message Body{} + + Body body = 1; + Signature signature = 2; +} diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go index f6dd94b3a..c93253105 100644 Binary files a/pkg/services/control/ir/service_frostfs.pb.go and b/pkg/services/control/ir/service_frostfs.pb.go differ diff --git a/pkg/services/control/ir/service_grpc.pb.go b/pkg/services/control/ir/service_grpc.pb.go index b6bc6fdba..6ba214da0 100644 Binary files a/pkg/services/control/ir/service_grpc.pb.go and b/pkg/services/control/ir/service_grpc.pb.go differ diff --git a/pkg/services/control/ir/types.pb.go b/pkg/services/control/ir/types.pb.go index c89cd5a0d..8107b917e 100644 Binary files a/pkg/services/control/ir/types.pb.go and b/pkg/services/control/ir/types.pb.go differ diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 2676ea7a5..31ebfa760 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -13,8 +13,6 @@ const ( rpcDropObjects = "DropObjects" rpcListShards = "ListShards" rpcSetShardMode = "SetShardMode" - rpcDumpShard = "DumpShard" - rpcRestoreShard = "RestoreShard" rpcSynchronizeTree = "SynchronizeTree" rpcEvacuateShard = "EvacuateShard" rpcFlushCache = "FlushCache" @@ -27,10 +25,7 @@ func HealthCheck( req *HealthCheckRequest, opts ...client.CallOption, ) (*HealthCheckResponse, error) { - wResp := &healthCheckResponseWrapper{ - m: new(HealthCheckResponse), - } - + wResp := newResponseWrapper[HealthCheckResponse]() wReq := &requestWrapper{ m: req, } @@ -40,7 +35,7 @@ func HealthCheck( return nil, err } - return wResp.m, nil + return wResp.message, nil } // SetNetmapStatus executes ControlService.SetNetmapStatus RPC. @@ -49,9 +44,7 @@ func SetNetmapStatus( req *SetNetmapStatusRequest, opts ...client.CallOption, ) (*SetNetmapStatusResponse, error) { - wResp := &setNetmapStatusResponseWrapper{ - m: new(SetNetmapStatusResponse), - } + wResp := newResponseWrapper[SetNetmapStatusResponse]() wReq := &requestWrapper{ m: req, @@ -62,7 +55,7 @@ func SetNetmapStatus( return nil, err } - return wResp.m, nil + return wResp.message, nil } // DropObjects executes ControlService.DropObjects RPC. @@ -71,9 +64,7 @@ func DropObjects( req *DropObjectsRequest, opts ...client.CallOption, ) (*DropObjectsResponse, error) { - wResp := &dropObjectsResponseWrapper{ - m: new(DropObjectsResponse), - } + wResp := newResponseWrapper[DropObjectsResponse]() wReq := &requestWrapper{ m: req, @@ -83,7 +74,7 @@ func DropObjects( return nil, err } - return wResp.m, nil + return wResp.message, nil } // ListShards executes ControlService.ListShards RPC. @@ -92,9 +83,7 @@ func ListShards( req *ListShardsRequest, opts ...client.CallOption, ) (*ListShardsResponse, error) { - wResp := &listShardsResponseWrapper{ - m: new(ListShardsResponse), - } + wResp := newResponseWrapper[ListShardsResponse]() wReq := &requestWrapper{ m: req, @@ -104,7 +93,7 @@ func ListShards( return nil, err } - return wResp.m, nil + return wResp.message, nil } // SetShardMode executes ControlService.SetShardMode RPC. @@ -113,9 +102,7 @@ func SetShardMode( req *SetShardModeRequest, opts ...client.CallOption, ) (*SetShardModeResponse, error) { - wResp := &setShardModeResponseWrapper{ - m: new(SetShardModeResponse), - } + wResp := newResponseWrapper[SetShardModeResponse]() wReq := &requestWrapper{ m: req, @@ -125,38 +112,12 @@ func SetShardMode( return nil, err } - return wResp.m, nil -} - -// DumpShard executes ControlService.DumpShard RPC. -func DumpShard(cli *client.Client, req *DumpShardRequest, opts ...client.CallOption) (*DumpShardResponse, error) { - wResp := &dumpShardResponseWrapper{new(DumpShardResponse)} - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDumpShard), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.DumpShardResponse, nil -} - -// RestoreShard executes ControlService.DumpShard RPC. -func RestoreShard(cli *client.Client, req *RestoreShardRequest, opts ...client.CallOption) (*RestoreShardResponse, error) { - wResp := &restoreShardResponseWrapper{new(RestoreShardResponse)} - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRestoreShard), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.RestoreShardResponse, nil + return wResp.message, nil } // SynchronizeTree executes ControlService.SynchronizeTree RPC. func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...client.CallOption) (*SynchronizeTreeResponse, error) { - wResp := &synchronizeTreeResponseWrapper{new(SynchronizeTreeResponse)} + wResp := newResponseWrapper[SynchronizeTreeResponse]() wReq := &requestWrapper{m: req} err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcSynchronizeTree), wReq, wResp, opts...) @@ -164,12 +125,12 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl return nil, err } - return wResp.SynchronizeTreeResponse, nil + return wResp.message, nil } // EvacuateShard executes ControlService.EvacuateShard RPC. func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) { - wResp := &evacuateShardResponseWrapper{new(EvacuateShardResponse)} + wResp := newResponseWrapper[EvacuateShardResponse]() wReq := &requestWrapper{m: req} err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...) @@ -177,12 +138,12 @@ func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client return nil, err } - return wResp.EvacuateShardResponse, nil + return wResp.message, nil } // FlushCache executes ControlService.FlushCache RPC. func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallOption) (*FlushCacheResponse, error) { - wResp := &flushCacheResponseWrapper{new(FlushCacheResponse)} + wResp := newResponseWrapper[FlushCacheResponse]() wReq := &requestWrapper{m: req} err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcFlushCache), wReq, wResp, opts...) @@ -190,12 +151,12 @@ func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallO return nil, err } - return wResp.FlushCacheResponse, nil + return wResp.message, nil } // Doctor executes ControlService.Doctor RPC. func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (*DoctorResponse, error) { - wResp := &doctorResponseWrapper{new(DoctorResponse)} + wResp := newResponseWrapper[DoctorResponse]() wReq := &requestWrapper{m: req} err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDoctor), wReq, wResp, opts...) @@ -203,5 +164,5 @@ func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) ( return nil, err } - return wResp.DoctorResponse, nil + return wResp.message, nil } diff --git a/pkg/services/control/server/dump.go b/pkg/services/control/server/dump.go deleted file mode 100644 index 28be02aa4..000000000 --- a/pkg/services/control/server/dump.go +++ /dev/null @@ -1,37 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) DumpShard(_ context.Context, req *control.DumpShardRequest) (*control.DumpShardResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID()) - - var prm shard.DumpPrm - prm.WithPath(req.GetBody().GetFilepath()) - prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors()) - - err = s.s.DumpShard(shardID, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := new(control.DumpShardResponse) - resp.SetBody(new(control.DumpShardResponse_Body)) - - err = SignMessage(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go index b64a91883..afa4011b9 100644 --- a/pkg/services/control/server/evacuate.go +++ b/pkg/services/control/server/evacuate.go @@ -48,7 +48,7 @@ func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRe return resp, nil } -func (s *Server) replicate(addr oid.Address, obj *objectSDK.Object) error { +func (s *Server) replicate(ctx context.Context, addr oid.Address, obj *objectSDK.Object) error { cid, ok := obj.ContainerID() if !ok { // Return nil to prevent situations where a shard can't be evacuated @@ -89,7 +89,7 @@ func (s *Server) replicate(addr oid.Address, obj *objectSDK.Object) error { task.SetObjectAddress(addr) task.SetCopiesNumber(1) task.SetNodes(nodes) - s.replicator.HandleTask(context.TODO(), task, &res) + s.replicator.HandleTask(ctx, task, &res) if res.count == 0 { return errors.New("object was not replicated") diff --git a/pkg/services/control/server/flush_cache.go b/pkg/services/control/server/flush_cache.go index fdfd136a6..9ead530db 100644 --- a/pkg/services/control/server/flush_cache.go +++ b/pkg/services/control/server/flush_cache.go @@ -9,7 +9,7 @@ import ( "google.golang.org/grpc/status" ) -func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) { +func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) { err := s.isValidRequest(req) if err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -19,7 +19,7 @@ func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) ( var prm engine.FlushWriteCachePrm prm.SetShardID(shardID) - _, err = s.s.FlushWriteCache(prm) + _, err = s.s.FlushWriteCache(ctx, prm) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/services/control/server/restore.go b/pkg/services/control/server/restore.go deleted file mode 100644 index 0e6367951..000000000 --- a/pkg/services/control/server/restore.go +++ /dev/null @@ -1,37 +0,0 @@ -package control - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s *Server) RestoreShard(_ context.Context, req *control.RestoreShardRequest) (*control.RestoreShardResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID()) - - var prm shard.RestorePrm - prm.WithPath(req.GetBody().GetFilepath()) - prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors()) - - err = s.s.RestoreShard(shardID, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := new(control.RestoreShardResponse) - resp.SetBody(new(control.RestoreShardResponse_Body)) - - err = SignMessage(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go index ba7e682c6..d4a856952 100644 --- a/pkg/services/control/server/set_netmap_status.go +++ b/pkg/services/control/server/set_netmap_status.go @@ -11,7 +11,7 @@ import ( // SetNetmapStatus sets node status in FrostFS network. // // If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { +func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { // verify request if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go index dd349dc57..ef0c0a8d2 100644 --- a/pkg/services/control/service.go +++ b/pkg/services/control/service.go @@ -127,64 +127,6 @@ func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) { } } -// SetShardID sets shard ID for the dump shard request. -func (x *DumpShardRequest_Body) SetShardID(id []byte) { - x.Shard_ID = id -} - -// SetFilepath sets filepath for the dump shard request. -func (x *DumpShardRequest_Body) SetFilepath(p string) { - x.Filepath = p -} - -// SetIgnoreErrors sets ignore errors flag for the dump shard request. -func (x *DumpShardRequest_Body) SetIgnoreErrors(ignore bool) { - x.IgnoreErrors = ignore -} - -// SetBody sets request body. -func (x *DumpShardRequest) SetBody(v *DumpShardRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets response body. -func (x *DumpShardResponse) SetBody(v *DumpShardResponse_Body) { - if x != nil { - x.Body = v - } -} - -// SetShardID sets shard ID for the restore shard request. -func (x *RestoreShardRequest_Body) SetShardID(id []byte) { - x.Shard_ID = id -} - -// SetFilepath sets filepath for the restore shard request. -func (x *RestoreShardRequest_Body) SetFilepath(p string) { - x.Filepath = p -} - -// SetIgnoreErrors sets ignore errors flag for the restore shard request. -func (x *RestoreShardRequest_Body) SetIgnoreErrors(ignore bool) { - x.IgnoreErrors = ignore -} - -// SetBody sets request body. -func (x *RestoreShardRequest) SetBody(v *RestoreShardRequest_Body) { - if x != nil { - x.Body = v - } -} - -// SetBody sets response body. -func (x *RestoreShardResponse) SetBody(v *RestoreShardResponse_Body) { - if x != nil { - x.Body = v - } -} - // SetBody sets list shards request body. func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) { if x != nil { diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go index ca3e2770e..a126ce16d 100644 Binary files a/pkg/services/control/service.pb.go and b/pkg/services/control/service.pb.go differ diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 7c661e661..32a87c744 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -23,12 +23,6 @@ service ControlService { // Sets mode of the shard. rpc SetShardMode (SetShardModeRequest) returns (SetShardModeResponse); - // Dump objects from the shard. - rpc DumpShard (DumpShardRequest) returns (DumpShardResponse); - - // Restore objects from dump. - rpc RestoreShard (RestoreShardRequest) returns (RestoreShardResponse); - // Synchronizes all log operations for the specified tree. rpc SynchronizeTree (SynchronizeTreeRequest) returns (SynchronizeTreeResponse); @@ -201,75 +195,6 @@ message SetShardModeResponse { Signature signature = 2; } -// DumpShard request. -message DumpShardRequest { - // Request body structure. - message Body { - // ID of the shard. - bytes shard_ID = 1; - - // Path to the output. - string filepath = 2; - - // Flag indicating whether object read errors should be ignored. - bool ignore_errors = 3; - } - - // Body of dump shard request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// DumpShard response. -message DumpShardResponse { - // Response body structure. - message Body { - } - - // Body of dump shard response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - - -// RestoreShard request. -message RestoreShardRequest { - // Request body structure. - message Body { - // ID of the shard. - bytes shard_ID = 1; - - // Path to the output. - string filepath = 2; - - // Flag indicating whether object read errors should be ignored. - bool ignore_errors = 3; - } - - // Body of restore shard request message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - -// RestoreShard response. -message RestoreShardResponse { - // Response body structure. - message Body { - } - - // Body of restore shard response message. - Body body = 1; - - // Body signature. - Signature signature = 2; -} - // SynchronizeTree request. message SynchronizeTreeRequest { // Request body structure. diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 0f50d5893..b9b865a90 100644 Binary files a/pkg/services/control/service_frostfs.pb.go and b/pkg/services/control/service_frostfs.pb.go differ diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index 4a4fbeac1..3fa1e54de 100644 Binary files a/pkg/services/control/service_grpc.pb.go and b/pkg/services/control/service_grpc.pb.go differ diff --git a/pkg/services/control/types.pb.go b/pkg/services/control/types.pb.go index 735517d3d..9d6864759 100644 Binary files a/pkg/services/control/types.pb.go and b/pkg/services/control/types.pb.go differ diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto index c85d672c2..5b4844580 100644 --- a/pkg/services/control/types.proto +++ b/pkg/services/control/types.proto @@ -54,9 +54,6 @@ message NodeInfo { // attributes it's a string presenting floating point number with comma or // point delimiter for decimal part. In the Network Map it will be saved as // 64-bit unsigned integer representing number of minimal token fractions. - // * Subnet \ - // String ID of Node's storage subnet. There can be only one subnet served - // by the Storage Node. // * Locode \ // Node's geographic location in // [UN/LOCODE](https://www.unece.org/cefact/codesfortrade/codes_index.html) diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 9fa3d767f..d77c69d4d 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -129,7 +129,7 @@ func (s *executorSvc) NetworkInfo( return resp, nil } -func (s *executorSvc) Snapshot(_ context.Context, req *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) { +func (s *executorSvc) Snapshot(_ context.Context, _ *netmap.SnapshotRequest) (*netmap.SnapshotResponse, error) { var nm netmap.NetMap err := s.state.ReadCurrentNetMap(&nm) diff --git a/pkg/services/notificator/nats/service.go b/pkg/services/notificator/nats/service.go index 54eb373ec..6a7e80a53 100644 --- a/pkg/services/notificator/nats/service.go +++ b/pkg/services/notificator/nats/service.go @@ -6,6 +6,7 @@ import ( "fmt" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/nats-io/nats.go" @@ -98,10 +99,10 @@ func New(oo ...Option) *Writer { w.opts.nOpts = append(w.opts.nOpts, nats.NoCallbacksAfterClientClose(), // do not call callbacks when it was planned writer stop nats.DisconnectErrHandler(func(conn *nats.Conn, err error) { - w.log.Error("nats: connection was lost", zap.Error(err)) + w.log.Error(logs.NatsNatsConnectionWasLost, zap.Error(err)) }), nats.ReconnectHandler(func(conn *nats.Conn) { - w.log.Warn("nats: reconnected to the server") + w.log.Warn(logs.NatsNatsReconnectedToTheServer) }), ) @@ -124,7 +125,7 @@ func (n *Writer) Connect(ctx context.Context, endpoint string) error { go func() { <-ctx.Done() - n.opts.log.Info("nats: closing connection as the context is done") + n.opts.log.Info(logs.NatsNatsClosingConnectionAsTheContextIsDone) nc.Close() }() diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go index 0a8a5d96d..bbf4e4823 100644 --- a/pkg/services/notificator/service.go +++ b/pkg/services/notificator/service.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -74,10 +75,10 @@ func New(prm *Prm) *Notificator { // and passes their addresses to the NotificationWriter. func (n *Notificator) ProcessEpoch(ctx context.Context, epoch uint64) { logger := n.l.With(zap.Uint64("epoch", epoch)) - logger.Debug("notificator: start processing object notifications") + logger.Debug(logs.NotificatorNotificatorStartProcessingObjectNotifications) n.ns.Iterate(ctx, epoch, func(topic string, addr oid.Address) { - n.l.Debug("notificator: processing object notification", + n.l.Debug(logs.NotificatorNotificatorProcessingObjectNotification, zap.String("topic", topic), zap.Stringer("address", addr), ) diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go index 87d2f9c82..351b4ad3b 100644 --- a/pkg/services/object/acl/acl.go +++ b/pkg/services/object/acl/acl.go @@ -125,15 +125,17 @@ func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error { return nil } + bearerTok := reqInfo.Bearer() + impersonate := bearerTok != nil && bearerTok.Impersonate() + // if bearer token is not allowed, then ignore it - if !basicACL.AllowedBearerRules(reqInfo.Operation()) { + if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) { reqInfo.CleanBearer() } var table eaclSDK.Table cnr := reqInfo.ContainerID() - bearerTok := reqInfo.Bearer() if bearerTok == nil { eaclInfo, err := c.eaclSrc.GetEACL(cnr) if err != nil { diff --git a/pkg/services/object/acl/v2/classifier.go b/pkg/services/object/acl/v2/classifier.go index 2bf5a3958..cdc5fb623 100644 --- a/pkg/services/object/acl/v2/classifier.go +++ b/pkg/services/object/acl/v2/classifier.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -48,7 +49,7 @@ func (c senderClassifier) classify( isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug("can't check if request from inner ring", + c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing, zap.String("error", err.Error())) } else if isInnerRingNode { return &classifyResult{ @@ -65,7 +66,7 @@ func (c senderClassifier) classify( // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug("can't check if request from container node", + c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode, zap.String("error", err.Error())) } else if isContainerNode { return &classifyResult{ diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go index 0cf734d7a..675768969 100644 --- a/pkg/services/object/acl/v2/request.go +++ b/pkg/services/object/acl/v2/request.go @@ -113,6 +113,10 @@ func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { return nil, nil, errEmptyVerificationHeader } + if r.bearer != nil && r.bearer.Impersonate() { + return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) + } + // if session token is presented, use it as truth source if r.token != nil { // verify signature of session token @@ -125,9 +129,13 @@ func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { return nil, nil, errEmptyBodySig } - key, err := unmarshalPublicKey(bodySignature.GetKey()) + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) if err != nil { - return nil, nil, fmt.Errorf("invalid key in body signature: %w", err) + return nil, nil, fmt.Errorf("invalid signature key: %w", err) } var idSender user.ID diff --git a/pkg/services/object/delete/container.go b/pkg/services/object/delete/container.go index a2f099d5b..3106d8efd 100644 --- a/pkg/services/object/delete/container.go +++ b/pkg/services/object/delete/container.go @@ -1,5 +1,7 @@ package deletesvc +import "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + func (exec *execCtx) executeOnContainer() { - exec.log.Debug("request is not rolled over to the container") + exec.log.Debug(logs.DeleteRequestIsNotRolledOverToTheContainer) } diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index a959b53cb..ebc191538 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -3,6 +3,7 @@ package deletesvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "go.uber.org/zap" ) @@ -34,7 +35,7 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") + exec.log.Debug(logs.ServingRequest) // perform local operation exec.executeLocal(ctx) @@ -46,9 +47,9 @@ func (exec *execCtx) analyzeStatus(execCnr bool) { // analyze local result switch exec.status { case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.OperationFinishedSuccessfully) default: - exec.log.Debug("operation finished with error", + exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", exec.err.Error()), ) diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index 782cad71b..91bc6b3d7 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -5,6 +5,7 @@ import ( "strconv" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -83,7 +84,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not compose split info", + exec.log.Debug(logs.DeleteCouldNotComposeSplitInfo, zap.String("error", err.Error()), ) case err == nil: @@ -96,7 +97,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool { func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) { if exec.splitInfo == nil { - exec.log.Debug("no split info, object is PHY") + exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY) return true } @@ -119,7 +120,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) { func (exec *execCtx) collectChain(ctx context.Context) bool { var chain []oid.ID - exec.log.Debug("assembling chain...") + exec.log.Debug(logs.DeleteAssemblingChain) for prev, withPrev := exec.splitInfo.LastPart(); withPrev; { chain = append(chain, prev) @@ -131,7 +132,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not get previous split element", + exec.log.Debug(logs.DeleteCouldNotGetPreviousSplitElement, zap.Stringer("id", prev), zap.String("error", err.Error()), ) @@ -154,7 +155,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool { } func (exec *execCtx) collectChildren(ctx context.Context) bool { - exec.log.Debug("collecting children...") + exec.log.Debug(logs.DeleteCollectingChildren) children, err := exec.svc.header.children(ctx, exec) @@ -163,7 +164,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not collect object children", + exec.log.Debug(logs.DeleteCouldNotCollectObjectChildren, zap.String("error", err.Error()), ) @@ -181,7 +182,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool { } func (exec *execCtx) supplementBySplitID(ctx context.Context) bool { - exec.log.Debug("supplement by split ID") + exec.log.Debug(logs.DeleteSupplementBySplitID) chain, err := exec.svc.searcher.splitMembers(ctx, exec) @@ -190,7 +191,7 @@ func (exec *execCtx) supplementBySplitID(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not search for split chain members", + exec.log.Debug(logs.DeleteCouldNotSearchForSplitChainMembers, zap.String("error", err.Error()), ) @@ -226,7 +227,7 @@ func (exec *execCtx) initTombstoneObject() bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not marshal tombstone structure", + exec.log.Debug(logs.DeleteCouldNotMarshalTombstoneStructure, zap.String("error", err.Error()), ) @@ -265,7 +266,7 @@ func (exec *execCtx) saveTombstone(ctx context.Context) bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not save the tombstone", + exec.log.Debug(logs.DeleteCouldNotSaveTheTombstone, zap.String("error", err.Error()), ) diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go index 17eb0e4e1..34839b194 100644 --- a/pkg/services/object/delete/local.go +++ b/pkg/services/object/delete/local.go @@ -3,20 +3,21 @@ package deletesvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" ) func (exec *execCtx) executeLocal(ctx context.Context) { - exec.log.Debug("forming tombstone structure...") + exec.log.Debug(logs.DeleteFormingTombstoneStructure) ok := exec.formTombstone(ctx) if !ok { return } - exec.log.Debug("tombstone structure successfully formed, saving...") + exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving) exec.saveTombstone(ctx) } @@ -27,7 +28,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not read tombstone lifetime config", + exec.log.Debug(logs.DeleteCouldNotReadTombstoneLifetimeConfig, zap.String("error", err.Error()), ) @@ -40,14 +41,14 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { ) exec.addMembers([]oid.ID{exec.address().Object()}) - exec.log.Debug("forming split info...") + exec.log.Debug(logs.DeleteFormingSplitInfo) ok = exec.formSplitInfo(ctx) if !ok { return } - exec.log.Debug("split info successfully formed, collecting members...") + exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) @@ -56,7 +57,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { return } - exec.log.Debug("members successfully collected") + exec.log.Debug(logs.DeleteMembersSuccessfullyCollected) ok = exec.initTombstoneObject() if !ok { diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go index f9870f7e0..f6341f02a 100644 --- a/pkg/services/object/delete/util.go +++ b/pkg/services/object/delete/util.go @@ -120,7 +120,7 @@ func (w *putSvcWrapper) put(ctx context.Context, exec *execCtx) (*oid.ID, error) WithCommonPrm(exec.commonParameters()). WithObject(exec.tombstoneObj.CutPayload()) - err = streamer.Init(initPrm) + err = streamer.Init(ctx, initPrm) if err != nil { return nil, err } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index db71df6a4..777822ac3 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -4,15 +4,16 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" ) -func (exec *execCtx) assemble(ctx context.Context) { - if !exec.canAssemble() { - exec.log.Debug("can not assemble the object") +func (r *request) assemble(ctx context.Context) { + if !r.canAssemble() { + r.log.Debug(logs.GetCanNotAssembleTheObject) return } @@ -27,35 +28,35 @@ func (exec *execCtx) assemble(ctx context.Context) { // - the assembly process is expected to be handled on a container node // only since the requests forwarding mechanism presentation; such the // node should have enough rights for getting any child object by design. - exec.prm.common.ForgetTokens() + r.prm.common.ForgetTokens() // Do not use forwarding during assembly stage. // Request forwarding closure inherited in produced // `execCtx` so it should be disabled there. - exec.disableForwarding() + r.disableForwarding() - exec.log.Debug("trying to assemble the object...") + r.log.Debug(logs.GetTryingToAssembleTheObject) - assembler := newAssembler(exec.address(), exec.splitInfo(), exec.ctxRange(), exec) + assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r) - exec.log.Debug("assembling splitted object...", - zap.Stringer("address", exec.address()), - zap.Uint64("range_offset", exec.ctxRange().GetOffset()), - zap.Uint64("range_length", exec.ctxRange().GetLength()), + r.log.Debug(logs.GetAssemblingSplittedObject, + zap.Stringer("address", r.address()), + zap.Uint64("range_offset", r.ctxRange().GetOffset()), + zap.Uint64("range_length", r.ctxRange().GetLength()), ) - defer exec.log.Debug("assembling splitted object completed", - zap.Stringer("address", exec.address()), - zap.Uint64("range_offset", exec.ctxRange().GetOffset()), - zap.Uint64("range_length", exec.ctxRange().GetLength()), + defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted, + zap.Stringer("address", r.address()), + zap.Uint64("range_offset", r.ctxRange().GetOffset()), + zap.Uint64("range_length", r.ctxRange().GetLength()), ) - obj, err := assembler.Assemble(ctx, exec.prm.objWriter) + obj, err := assembler.Assemble(ctx, r.prm.objWriter) if err != nil { - exec.log.Warn("failed to assemble splitted object", + r.log.Warn(logs.GetFailedToAssembleSplittedObject, zap.Error(err), - zap.Stringer("address", exec.address()), - zap.Uint64("range_offset", exec.ctxRange().GetOffset()), - zap.Uint64("range_length", exec.ctxRange().GetLength()), + zap.Stringer("address", r.address()), + zap.Uint64("range_offset", r.ctxRange().GetOffset()), + zap.Uint64("range_length", r.ctxRange().GetLength()), ) } @@ -67,27 +68,27 @@ func (exec *execCtx) assemble(ctx context.Context) { switch { default: - exec.status = statusUndefined - exec.err = err + r.status = statusUndefined + r.err = err case err == nil: - exec.status = statusOK - exec.err = nil - exec.collectedObject = obj + r.status = statusOK + r.err = nil + r.collectedObject = obj case errors.As(err, &errRemovedRemote): - exec.status = statusINHUMED - exec.err = errRemovedRemote + r.status = statusINHUMED + r.err = errRemovedRemote case errors.As(err, &errRemovedLocal): - exec.status = statusINHUMED - exec.err = errRemovedLocal + r.status = statusINHUMED + r.err = errRemovedLocal case errors.As(err, &errSplitInfo): - exec.status = statusVIRTUAL - exec.err = errSplitInfo + r.status = statusVIRTUAL + r.err = errSplitInfo case errors.As(err, &errOutOfRangeRemote): - exec.status = statusOutOfRange - exec.err = errOutOfRangeRemote + r.status = statusOutOfRange + r.err = errOutOfRangeRemote case errors.As(err, &errOutOfRangeLocal): - exec.status = statusOutOfRange - exec.err = errOutOfRangeLocal + r.status = statusOutOfRange + r.err = errOutOfRangeLocal } } @@ -95,42 +96,54 @@ func equalAddresses(a, b oid.Address) bool { return a.Container().Equals(b.Container()) && a.Object().Equals(b.Object()) } -func (exec *execCtx) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) { - p := exec.prm - p.common = p.common.WithLocalOnly(false) - p.addr.SetContainer(exec.containerID()) - p.addr.SetObject(id) - - prm := HeadPrm{ - commonPrm: p.commonPrm, - } - +func (r *request) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) { w := NewSimpleObjectWriter() - prm.SetHeaderWriter(w) - err := exec.svc.Head(ctx, prm) - if err != nil { + p := RequestParameters{} + p.common = p.common.WithLocalOnly(false) + p.addr.SetContainer(r.containerID()) + p.addr.SetObject(id) + p.head = true + p.SetHeaderWriter(w) + + if err := r.getObjectWithIndependentRequest(ctx, p); err != nil { return nil, err } return w.Object(), nil } -func (exec *execCtx) GetObject(ctx context.Context, id oid.ID, rng *objectSDK.Range) (*objectSDK.Object, error) { +func (r *request) GetObject(ctx context.Context, id oid.ID, rng *objectSDK.Range) (*objectSDK.Object, error) { w := NewSimpleObjectWriter() - p := exec.prm + p := r.prm p.common = p.common.WithLocalOnly(false) p.objWriter = w - p.SetRange(rng) + p.rng = rng - p.addr.SetContainer(exec.containerID()) + p.addr.SetContainer(r.containerID()) p.addr.SetObject(id) - statusError := exec.svc.get(ctx, p.commonPrm, withPayloadRange(rng)) - - if statusError.err != nil { - return nil, statusError.err + if err := r.getObjectWithIndependentRequest(ctx, p); err != nil { + return nil, err } return w.Object(), nil } + +func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm RequestParameters) error { + detachedExecutor := &request{ + keyStore: r.keyStore, + traverserGenerator: r.traverserGenerator, + remoteStorageConstructor: r.remoteStorageConstructor, + epochSource: r.epochSource, + localStorage: r.localStorage, + + prm: prm, + infoSplit: objectSDK.NewSplitInfo(), + log: r.log, + } + + detachedExecutor.execute(ctx) + + return detachedExecutor.statusError.err +} diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index 4ae1981b1..321d4b16d 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -2,7 +2,6 @@ package getsvc import ( "context" - "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -15,10 +14,6 @@ type objectGetter interface { HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Object, error) } -var ( - errParentAddressDiffers = errors.New("parent address in child object differs") -) - type assembler struct { addr oid.Address splitInfo *objectSDK.SplitInfo @@ -89,7 +84,7 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID) parentObject := sourceObject.Parent() if parentObject == nil { - return nil, nil, errors.New("received child with empty parent") + return nil, nil, errChildWithEmptyParent } a.parentObject = parentObject @@ -159,10 +154,7 @@ func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenID if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { return err } - if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { - return err - } - return nil + return writer.WriteChunk(ctx, a.parentObject.Payload()) } func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index cfb538d38..d22b14192 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -3,30 +3,31 @@ package getsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "go.uber.org/zap" ) -func (exec *execCtx) executeOnContainer(ctx context.Context) { - if exec.isLocal() { - exec.log.Debug("return result directly") +func (r *request) executeOnContainer(ctx context.Context) { + if r.isLocal() { + r.log.Debug(logs.GetReturnResultDirectly) return } - lookupDepth := exec.netmapLookupDepth() + lookupDepth := r.netmapLookupDepth() - exec.log.Debug("trying to execute in container...", + r.log.Debug(logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) // initialize epoch number - ok := exec.initEpoch() + ok := r.initEpoch() if !ok { return } for { - if exec.processCurrentEpoch(ctx) { + if r.processCurrentEpoch(ctx) { break } @@ -38,16 +39,16 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) { lookupDepth-- // go to the previous epoch - exec.curProcEpoch-- + r.curProcEpoch-- } } -func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { - exec.log.Debug("process epoch", - zap.Uint64("number", exec.curProcEpoch), +func (r *request) processCurrentEpoch(ctx context.Context) bool { + r.log.Debug(logs.ProcessEpoch, + zap.Uint64("number", r.curProcEpoch), ) - traverser, ok := exec.generateTraverser(exec.address()) + traverser, ok := r.generateTraverser(r.address()) if !ok { return true } @@ -55,12 +56,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() - exec.status = statusUndefined + r.status = statusUndefined for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug("no more nodes, abort placement iteration") + r.log.Debug(logs.NoMoreNodesAbortPlacementIteration) return false } @@ -68,8 +69,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { for i := range addrs { select { case <-ctx.Done(): - exec.log.Debug("interrupt placement iteration by context", - zap.String("error", ctx.Err().Error()), + r.log.Debug(logs.InterruptPlacementIterationByContext, + zap.Error(ctx.Err()), ) return true @@ -83,8 +84,8 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { client.NodeInfoFromNetmapElement(&info, addrs[i]) - if exec.processNode(ctx, info) { - exec.log.Debug("completing the operation") + if r.processNode(ctx, info) { + r.log.Debug(logs.GetCompletingTheOperation) return true } } diff --git a/pkg/services/object/get/errors.go b/pkg/services/object/get/errors.go new file mode 100644 index 000000000..6ea16a144 --- /dev/null +++ b/pkg/services/object/get/errors.go @@ -0,0 +1,10 @@ +package getsvc + +import "errors" + +var ( + errRangeZeroLength = errors.New("zero range length") + errRangeOverflow = errors.New("range overflow") + errChildWithEmptyParent = errors.New("received child with empty parent") + errParentAddressDiffers = errors.New("parent address in child object differs") +) diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go deleted file mode 100644 index 2ba014574..000000000 --- a/pkg/services/object/get/exec.go +++ /dev/null @@ -1,278 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - - clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type statusError struct { - status int - err error -} - -type execCtx struct { - svc *Service - - prm RangePrm - - statusError - - infoSplit *objectSDK.SplitInfo - - log *logger.Logger - - collectedObject *objectSDK.Object - - head bool - - curProcEpoch uint64 -} - -type execOption func(*execCtx) - -const ( - statusUndefined int = iota - statusOK - statusINHUMED - statusVIRTUAL - statusOutOfRange -) - -func headOnly() execOption { - return func(c *execCtx) { - c.head = true - } -} - -func withPayloadRange(r *objectSDK.Range) execOption { - return func(c *execCtx) { - c.prm.rng = r - } -} - -func (exec *execCtx) setLogger(l *logger.Logger) { - req := "GET" - if exec.headOnly() { - req = "HEAD" - } else if exec.ctxRange() != nil { - req = "GET_RANGE" - } - - exec.log = &logger.Logger{Logger: l.With( - zap.String("request", req), - zap.Stringer("address", exec.address()), - zap.Bool("raw", exec.isRaw()), - zap.Bool("local", exec.isLocal()), - zap.Bool("with session", exec.prm.common.SessionToken() != nil), - zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - )} -} - -func (exec execCtx) isLocal() bool { - return exec.prm.common.LocalOnly() -} - -func (exec execCtx) isRaw() bool { - return exec.prm.raw -} - -func (exec execCtx) address() oid.Address { - return exec.prm.addr -} - -func (exec execCtx) key() (*ecdsa.PrivateKey, error) { - if exec.prm.signerKey != nil { - // the key has already been requested and - // cached in the previous operations - return exec.prm.signerKey, nil - } - - var sessionInfo *util.SessionInfo - - if tok := exec.prm.common.SessionToken(); tok != nil { - sessionInfo = &util.SessionInfo{ - ID: tok.ID(), - Owner: tok.Issuer(), - } - } - - return exec.svc.keyStore.GetKey(sessionInfo) -} - -func (exec *execCtx) canAssemble() bool { - return !exec.isRaw() && !exec.headOnly() -} - -func (exec *execCtx) splitInfo() *objectSDK.SplitInfo { - return exec.infoSplit -} - -func (exec *execCtx) containerID() cid.ID { - return exec.address().Container() -} - -func (exec *execCtx) ctxRange() *objectSDK.Range { - return exec.prm.rng -} - -func (exec *execCtx) headOnly() bool { - return exec.head -} - -func (exec *execCtx) netmapEpoch() uint64 { - return exec.prm.common.NetmapEpoch() -} - -func (exec *execCtx) netmapLookupDepth() uint64 { - return exec.prm.common.NetmapLookupDepth() -} - -func (exec *execCtx) initEpoch() bool { - exec.curProcEpoch = exec.netmapEpoch() - if exec.curProcEpoch > 0 { - return true - } - - e, err := exec.svc.currentEpochReceiver.currentEpoch() - - switch { - default: - exec.status = statusUndefined - exec.err = err - - exec.log.Debug("could not get current epoch number", - zap.String("error", err.Error()), - ) - - return false - case err == nil: - exec.curProcEpoch = e - return true - } -} - -func (exec *execCtx) generateTraverser(addr oid.Address) (*placement.Traverser, bool) { - obj := addr.Object() - - t, err := exec.svc.traverserGenerator.GenerateTraverser(addr.Container(), &obj, exec.curProcEpoch) - - switch { - default: - exec.status = statusUndefined - exec.err = err - - exec.log.Debug("could not generate container traverser", - zap.String("error", err.Error()), - ) - - return nil, false - case err == nil: - return t, true - } -} - -func (exec execCtx) remoteClient(info clientcore.NodeInfo) (getClient, bool) { - c, err := exec.svc.clientCache.get(info) - - switch { - default: - exec.status = statusUndefined - exec.err = err - - exec.log.Debug("could not construct remote node client") - case err == nil: - return c, true - } - - return nil, false -} - -func mergeSplitInfo(dst, src *objectSDK.SplitInfo) { - if last, ok := src.LastPart(); ok { - dst.SetLastPart(last) - } - - if link, ok := src.Link(); ok { - dst.SetLink(link) - } - - if splitID := src.SplitID(); splitID != nil { - dst.SetSplitID(splitID) - } -} - -func (exec *execCtx) writeCollectedHeader(ctx context.Context) bool { - if exec.ctxRange() != nil { - return true - } - - err := exec.prm.objWriter.WriteHeader( - ctx, - exec.collectedObject.CutPayload(), - ) - - switch { - default: - exec.status = statusUndefined - exec.err = err - - exec.log.Debug("could not write header", - zap.String("error", err.Error()), - ) - case err == nil: - exec.status = statusOK - exec.err = nil - } - - return exec.status == statusOK -} - -func (exec *execCtx) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) bool { - if exec.headOnly() { - return true - } - - err := exec.prm.objWriter.WriteChunk(ctx, obj.Payload()) - - switch { - default: - exec.status = statusUndefined - exec.err = err - - exec.log.Debug("could not write payload chunk", - zap.String("error", err.Error()), - ) - case err == nil: - exec.status = statusOK - exec.err = nil - } - - return err == nil -} - -func (exec *execCtx) writeCollectedObject(ctx context.Context) { - if ok := exec.writeCollectedHeader(ctx); ok { - exec.writeObjectPayload(ctx, exec.collectedObject) - } -} - -// isForwardingEnabled returns true if common execution -// parameters has request forwarding closure set. -func (exec execCtx) isForwardingEnabled() bool { - return exec.prm.forwarder != nil -} - -// disableForwarding removes request forwarding closure from common -// parameters, so it won't be inherited in new execution contexts. -func (exec *execCtx) disableForwarding() { - exec.prm.SetRequestForwarder(nil) -} diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 0f5983e99..457193a59 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -3,6 +3,7 @@ package getsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" @@ -10,18 +11,18 @@ import ( // Get serves a request to get an object by address, and returns Streamer instance. func (s *Service) Get(ctx context.Context, prm Prm) error { - return s.get(ctx, prm.commonPrm).err + return s.get(ctx, RequestParameters{ + commonPrm: prm.commonPrm, + }) } // GetRange serves a request to get an object by address, and returns Streamer instance. func (s *Service) GetRange(ctx context.Context, prm RangePrm) error { - return s.getRange(ctx, prm) + return s.get(ctx, RequestParameters{ + commonPrm: prm.commonPrm, + rng: prm.rng, + }) } - -func (s *Service) getRange(ctx context.Context, prm RangePrm, opts ...execOption) error { - return s.get(ctx, prm.commonPrm, append(opts, withPayloadRange(prm.rng))...).err -} - func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHashRes, error) { hashes := make([][]byte, 0, len(prm.rngs)) @@ -33,16 +34,15 @@ func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHas // 1. Potential gains are insignificant when operating in the Internet given typical latencies and losses. // 2. Parallel solution is more complex in terms of code. // 3. TZ-hash is likely to be disabled in private installations. - rngPrm := RangePrm{ + reqPrm := RequestParameters{ commonPrm: prm.commonPrm, + rng: &rng, } - - rngPrm.SetRange(&rng) - rngPrm.SetChunkWriter(&hasherWrapper{ + reqPrm.SetChunkWriter(&hasherWrapper{ hash: util.NewSaltingWriter(h, prm.salt), }) - if err := s.getRange(ctx, rngPrm); err != nil { + if err := s.get(ctx, reqPrm); err != nil { return nil, err } @@ -59,31 +59,33 @@ func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHas // Returns ErrNotFound if the header was not received for the call. // Returns SplitInfoError if object is virtual and raw flag is set. func (s *Service) Head(ctx context.Context, prm HeadPrm) error { - return s.get(ctx, prm.commonPrm, headOnly()).err + return s.get(ctx, RequestParameters{ + head: true, + commonPrm: prm.commonPrm, + }) } -func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) statusError { - exec := &execCtx{ - svc: s, - prm: RangePrm{ - commonPrm: prm, - }, - infoSplit: object.NewSplitInfo(), - } +func (s *Service) get(ctx context.Context, prm RequestParameters) error { + exec := &request{ + keyStore: s.keyStore, + traverserGenerator: s.traverserGenerator, + remoteStorageConstructor: s.remoteStorageConstructor, + epochSource: s.epochSource, + localStorage: s.localStorage, - for i := range opts { - opts[i](exec) + prm: prm, + infoSplit: object.NewSplitInfo(), } exec.setLogger(s.log) exec.execute(ctx) - return exec.statusError + return exec.statusError.err } -func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") +func (exec *request) execute(ctx context.Context) { + exec.log.Debug(logs.ServingRequest) // perform local operation exec.executeLocal(ctx) @@ -91,21 +93,21 @@ func (exec *execCtx) execute(ctx context.Context) { exec.analyzeStatus(ctx, true) } -func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) { +func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result switch exec.status { case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug("requested object was marked as removed") + exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug("requested object is virtual") + exec.log.Debug(logs.GetRequestedObjectIsVirtual) exec.assemble(ctx) case statusOutOfRange: - exec.log.Debug("requested range is out of object bounds") + exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds) default: - exec.log.Debug("operation finished with error", - zap.String("error", exec.err.Error()), + exec.log.Debug(logs.OperationFinishedWithError, + zap.Error(exec.err), ) if execCnr { diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 319bc6b58..9c5506064 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -2,6 +2,7 @@ package getsvc import ( "context" + "crypto/ecdsa" "crypto/rand" "errors" "fmt" @@ -56,7 +57,7 @@ type testClient struct { type testEpochReceiver uint64 -func (e testEpochReceiver) currentEpoch() (uint64, error) { +func (e testEpochReceiver) Epoch() (uint64, error) { return uint64(e), nil } @@ -99,7 +100,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap. return vs, nil } -func (c *testClientCache) get(info client.NodeInfo) (getClient, error) { +func (c *testClientCache) Get(info client.NodeInfo) (remoteStorage, error) { v, ok := c.clients[network.StringifyGroup(info.AddressGroup())] if !ok { return nil, errors.New("could not construct client") @@ -117,8 +118,15 @@ func newTestClient() *testClient { } } -func (c *testClient) getObject(ctx context.Context, exec *execCtx, _ client.NodeInfo) (*objectSDK.Object, error) { - v, ok := c.results[exec.address().EncodeToString()] +func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err error) { + c.results[addr.EncodeToString()] = struct { + obj *objectSDK.Object + err error + }{obj: obj, err: err} +} + +func (c *testClient) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + v, ok := c.results[address.EncodeToString()] if !ok { var errNotFound apistatus.ObjectNotFound @@ -129,21 +137,38 @@ func (c *testClient) getObject(ctx context.Context, exec *execCtx, _ client.Node return nil, v.err } - return cutToRange(v.obj, exec.ctxRange()), nil + return v.obj, nil } -func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err error) { - c.results[addr.EncodeToString()] = struct { - obj *objectSDK.Object - err error - }{obj: obj, err: err} +func (c *testClient) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + return c.Get(ctx, address, requestParams) } -func (s *testStorage) get(_ context.Context, exec *execCtx) (*objectSDK.Object, error) { +func (c *testClient) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + obj, err := c.Get(ctx, address, requestParams) + if err != nil { + return nil, err + } + return cutToRange(obj, rng), nil +} + +func (c *testClient) ForwardRequest(ctx context.Context, info client.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) { + return nil, fmt.Errorf("not implemented") +} + +func (s *testStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) { + return s.Range(ctx, address, nil) +} + +func (s *testStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) { + return s.Range(ctx, address, nil) +} + +func (s *testStorage) Range(_ context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) { var ( ok bool obj *objectSDK.Object - sAddr = exec.address().EncodeToString() + sAddr = address.EncodeToString() ) if _, ok = s.inhumed[sAddr]; ok { @@ -157,7 +182,7 @@ func (s *testStorage) get(_ context.Context, exec *execCtx) (*objectSDK.Object, } if obj, ok = s.phy[sAddr]; ok { - return cutToRange(obj, exec.ctxRange()), nil + return cutToRange(obj, rng), nil } var errNotFound apistatus.ObjectNotFound @@ -241,15 +266,21 @@ func (w *writePayloadErrorObjectWriter) WriteChunk(_ context.Context, _ []byte) return &writePayloadError{} } +type testKeyStorage struct { +} + +func (ks *testKeyStorage) GetKey(_ *util.SessionInfo) (*ecdsa.PrivateKey, error) { + return &ecdsa.PrivateKey{}, nil +} + func TestGetLocalOnly(t *testing.T) { ctx := context.Background() newSvc := func(storage *testStorage) *Service { - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t, false) - svc.localStorage = storage - - return svc + return &Service{ + log: test.NewLogger(t, false), + localStorage: storage, + } } newPrm := func(raw bool, w ObjectWriter) Prm { @@ -506,22 +537,21 @@ func TestGetRemoteSmall(t *testing.T) { container.CalculateID(&idCnr, cnr) newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service { - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t, false) - svc.localStorage = newTestStorage() - const curEpoch = 13 - svc.traverserGenerator = &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: b, + return &Service{ + log: test.NewLogger(t, false), + localStorage: newTestStorage(), + traverserGenerator: &testTraverserGenerator{ + c: cnr, + b: map[uint64]placement.Builder{ + curEpoch: b, + }, }, + epochSource: testEpochReceiver(curEpoch), + remoteStorageConstructor: c, + keyStore: &testKeyStorage{}, } - svc.clientCache = c - svc.currentEpochReceiver = testEpochReceiver(curEpoch) - - return svc } newPrm := func(raw bool, w ObjectWriter) Prm { @@ -1176,7 +1206,7 @@ func TestGetRemoteSmall(t *testing.T) { err := svc.Get(ctx, p) require.Error(t, err) - require.Equal(t, err.Error(), "received child with empty parent") + require.ErrorIs(t, err, errChildWithEmptyParent) w = NewSimpleObjectWriter() payloadSz := srcObj.PayloadSize() @@ -1189,7 +1219,7 @@ func TestGetRemoteSmall(t *testing.T) { err = svc.GetRange(ctx, rngPrm) require.Error(t, err) - require.Equal(t, err.Error(), "received child with empty parent") + require.ErrorIs(t, err, errChildWithEmptyParent) }) t.Run("out of range", func(t *testing.T) { @@ -1639,39 +1669,38 @@ func TestGetFromPastEpoch(t *testing.T) { c22 := newTestClient() c22.addResult(addr, obj, nil) - svc := &Service{cfg: new(cfg)} - svc.log = test.NewLogger(t, false) - svc.localStorage = newTestStorage() - const curEpoch = 13 - svc.traverserGenerator = &testTraverserGenerator{ - c: cnr, - b: map[uint64]placement.Builder{ - curEpoch: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[:1], + svc := &Service{ + log: test.NewLogger(t, false), + localStorage: newTestStorage(), + epochSource: testEpochReceiver(curEpoch), + traverserGenerator: &testTraverserGenerator{ + c: cnr, + b: map[uint64]placement.Builder{ + curEpoch: &testPlacementBuilder{ + vectors: map[string][][]netmap.NodeInfo{ + addr.EncodeToString(): ns[:1], + }, }, - }, - curEpoch - 1: &testPlacementBuilder{ - vectors: map[string][][]netmap.NodeInfo{ - addr.EncodeToString(): ns[1:], + curEpoch - 1: &testPlacementBuilder{ + vectors: map[string][][]netmap.NodeInfo{ + addr.EncodeToString(): ns[1:], + }, }, }, }, - } - - svc.clientCache = &testClientCache{ - clients: map[string]*testClient{ - as[0][0]: c11, - as[0][1]: c12, - as[1][0]: c21, - as[1][1]: c22, + remoteStorageConstructor: &testClientCache{ + clients: map[string]*testClient{ + as[0][0]: c11, + as[0][1]: c12, + as[1][0]: c21, + as[1][1]: c22, + }, }, + keyStore: &testKeyStorage{}, } - svc.currentEpochReceiver = testEpochReceiver(curEpoch) - w := NewSimpleObjectWriter() commonPrm := new(util.CommonPrm) diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go index 82ed911e4..62dde3281 100644 --- a/pkg/services/object/get/local.go +++ b/pkg/services/object/get/local.go @@ -5,12 +5,13 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" ) -func (exec *execCtx) executeLocal(ctx context.Context) { +func (r *request) executeLocal(ctx context.Context) { ctx, span := tracing.StartSpanFromContext(ctx, "getService.executeLocal") defer func() { span.End() @@ -18,7 +19,7 @@ func (exec *execCtx) executeLocal(ctx context.Context) { var err error - exec.collectedObject, err = exec.svc.localStorage.get(ctx, exec) + r.collectedObject, err = r.get(ctx) var errSplitInfo *objectSDK.SplitInfoError var errRemoved apistatus.ObjectAlreadyRemoved @@ -26,25 +27,33 @@ func (exec *execCtx) executeLocal(ctx context.Context) { switch { default: - exec.status = statusUndefined - exec.err = err + r.status = statusUndefined + r.err = err - exec.log.Debug("local get failed", - zap.String("error", err.Error()), - ) + r.log.Debug(logs.GetLocalGetFailed, zap.Error(err)) case err == nil: - exec.status = statusOK - exec.err = nil - exec.writeCollectedObject(ctx) + r.status = statusOK + r.err = nil + r.writeCollectedObject(ctx) case errors.As(err, &errRemoved): - exec.status = statusINHUMED - exec.err = errRemoved + r.status = statusINHUMED + r.err = errRemoved case errors.As(err, &errSplitInfo): - exec.status = statusVIRTUAL - mergeSplitInfo(exec.splitInfo(), errSplitInfo.SplitInfo()) - exec.err = objectSDK.NewSplitInfoError(exec.infoSplit) + r.status = statusVIRTUAL + mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo()) + r.err = objectSDK.NewSplitInfoError(r.infoSplit) case errors.As(err, &errOutOfRange): - exec.status = statusOutOfRange - exec.err = errOutOfRange + r.status = statusOutOfRange + r.err = errOutOfRange } } + +func (r *request) get(ctx context.Context) (*objectSDK.Object, error) { + if r.headOnly() { + return r.localStorage.Head(ctx, r.address(), r.isRaw()) + } + if rng := r.ctxRange(); rng != nil { + return r.localStorage.Range(ctx, r.address(), rng) + } + return r.localStorage.Get(ctx, r.address()) +} diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go index 7a0f1e062..cbdb7a3e2 100644 --- a/pkg/services/object/get/prm.go +++ b/pkg/services/object/get/prm.go @@ -3,12 +3,11 @@ package getsvc import ( "context" "crypto/ecdsa" - "errors" "hash" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) @@ -21,14 +20,9 @@ type Prm struct { type RangePrm struct { commonPrm - rng *object.Range + rng *objectSDK.Range } -var ( - errRangeZeroLength = errors.New("zero range length") - errRangeOverflow = errors.New("range overflow") -) - // Validate pre-validates `OBJECTRANGE` request's parameters content // without access to the requested object's payload. func (p RangePrm) Validate() error { @@ -54,12 +48,18 @@ type RangeHashPrm struct { hashGen func() hash.Hash - rngs []object.Range + rngs []objectSDK.Range salt []byte } -type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) (*object.Object, error) +type RequestParameters struct { + commonPrm + head bool + rng *objectSDK.Range +} + +type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) (*objectSDK.Object, error) // HeadPrm groups parameters of Head service call. type HeadPrm struct { @@ -83,43 +83,25 @@ type commonPrm struct { signerKey *ecdsa.PrivateKey } -// ChunkWriter is an interface of target component -// to write payload chunk. -type ChunkWriter interface { - WriteChunk(context.Context, []byte) error -} - -// HeaderWriter is an interface of target component -// to write object header. -type HeaderWriter interface { - WriteHeader(context.Context, *object.Object) error -} - -// ObjectWriter is an interface of target component to write object. -type ObjectWriter interface { - HeaderWriter - ChunkWriter -} - // SetObjectWriter sets target component to write the object. func (p *Prm) SetObjectWriter(w ObjectWriter) { p.objWriter = w } // SetChunkWriter sets target component to write the object payload range. -func (p *RangePrm) SetChunkWriter(w ChunkWriter) { +func (p *commonPrm) SetChunkWriter(w ChunkWriter) { p.objWriter = &partWriter{ chunkWriter: w, } } // SetRange sets range of the requested payload data. -func (p *RangePrm) SetRange(rng *object.Range) { +func (p *RangePrm) SetRange(rng *objectSDK.Range) { p.rng = rng } // SetRangeList sets a list of object payload ranges. -func (p *RangeHashPrm) SetRangeList(rngs []object.Range) { +func (p *RangeHashPrm) SetRangeList(rngs []objectSDK.Range) { p.rngs = rngs } @@ -158,7 +140,7 @@ func (p *commonPrm) WithCachedSignerKey(signerKey *ecdsa.PrivateKey) { } // SetHeaderWriter sets target component to write the object header. -func (p *HeadPrm) SetHeaderWriter(w HeaderWriter) { +func (p *commonPrm) SetHeaderWriter(w HeaderWriter) { p.objWriter = &partWriter{ headWriter: w, } diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index 697e48ee2..69bdbf271 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -5,24 +5,25 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" ) -func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool { +func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode") defer span.End() - exec.log.Debug("processing node...") + r.log.Debug(logs.ProcessingNode) - client, ok := exec.remoteClient(info) + rs, ok := r.getRemoteStorage(info) if !ok { return true } - obj, err := client.getObject(ctx, exec, info) + obj, err := r.getRemote(ctx, rs, info) var errSplitInfo *objectSDK.SplitInfoError var errRemoved *apistatus.ObjectAlreadyRemoved @@ -32,34 +33,66 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool default: var errNotFound apistatus.ObjectNotFound - exec.status = statusUndefined - exec.err = errNotFound + r.status = statusUndefined + r.err = errNotFound - exec.log.Debug("remote call failed", - zap.String("error", err.Error()), - ) + r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err)) case err == nil: - exec.status = statusOK - exec.err = nil + r.status = statusOK + r.err = nil // both object and err are nil only if the original // request was forwarded to another node and the object // has already been streamed to the requesting party if obj != nil { - exec.collectedObject = obj - exec.writeCollectedObject(ctx) + r.collectedObject = obj + r.writeCollectedObject(ctx) } case errors.As(err, &errRemoved): - exec.status = statusINHUMED - exec.err = errRemoved + r.status = statusINHUMED + r.err = errRemoved case errors.As(err, &errOutOfRange): - exec.status = statusOutOfRange - exec.err = errOutOfRange + r.status = statusOutOfRange + r.err = errOutOfRange case errors.As(err, &errSplitInfo): - exec.status = statusVIRTUAL - mergeSplitInfo(exec.splitInfo(), errSplitInfo.SplitInfo()) - exec.err = objectSDK.NewSplitInfoError(exec.infoSplit) + r.status = statusVIRTUAL + mergeSplitInfo(r.splitInfo(), errSplitInfo.SplitInfo()) + r.err = objectSDK.NewSplitInfoError(r.infoSplit) } - return exec.status != statusUndefined + return r.status != statusUndefined +} + +func (r *request) getRemote(ctx context.Context, rs remoteStorage, info client.NodeInfo) (*objectSDK.Object, error) { + if r.isForwardingEnabled() { + return rs.ForwardRequest(ctx, info, r.prm.forwarder) + } + + key, err := r.key() + if err != nil { + return nil, err + } + + prm := RemoteRequestParams{ + Epoch: r.curProcEpoch, + TTL: r.prm.common.TTL(), + PrivateKey: key, + SessionToken: r.prm.common.SessionToken(), + BearerToken: r.prm.common.BearerToken(), + XHeaders: r.prm.common.XHeaders(), + IsRaw: r.isRaw(), + } + + if r.headOnly() { + return rs.Head(ctx, r.address(), prm) + } + // we don't specify payload writer because we accumulate + // the object locally (even huge). + if rng := r.ctxRange(); rng != nil { + // Current spec allows other storage node to deny access, + // fallback to GET here. + return rs.Range(ctx, r.address(), rng, prm) + } + + return rs.Get(ctx, r.address(), prm) } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go new file mode 100644 index 000000000..b9223a637 --- /dev/null +++ b/pkg/services/object/get/request.go @@ -0,0 +1,244 @@ +package getsvc + +import ( + "context" + "crypto/ecdsa" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.uber.org/zap" +) + +type request struct { + prm RequestParameters + + statusError + + infoSplit *objectSDK.SplitInfo + + log *logger.Logger + + collectedObject *objectSDK.Object + + curProcEpoch uint64 + + keyStore keyStorage + epochSource epochSource + traverserGenerator traverserGenerator + remoteStorageConstructor remoteStorageConstructor + localStorage localStorage +} + +func (r *request) setLogger(l *logger.Logger) { + req := "GET" + if r.headOnly() { + req = "HEAD" + } else if r.ctxRange() != nil { + req = "GET_RANGE" + } + + r.log = &logger.Logger{Logger: l.With( + zap.String("request", req), + zap.Stringer("address", r.address()), + zap.Bool("raw", r.isRaw()), + zap.Bool("local", r.isLocal()), + zap.Bool("with session", r.prm.common.SessionToken() != nil), + zap.Bool("with bearer", r.prm.common.BearerToken() != nil), + )} +} + +func (r *request) isLocal() bool { + return r.prm.common.LocalOnly() +} + +func (r *request) isRaw() bool { + return r.prm.raw +} + +func (r *request) address() oid.Address { + return r.prm.addr +} + +func (r *request) key() (*ecdsa.PrivateKey, error) { + if r.prm.signerKey != nil { + // the key has already been requested and + // cached in the previous operations + return r.prm.signerKey, nil + } + + var sessionInfo *util.SessionInfo + + if tok := r.prm.common.SessionToken(); tok != nil { + sessionInfo = &util.SessionInfo{ + ID: tok.ID(), + Owner: tok.Issuer(), + } + } + + return r.keyStore.GetKey(sessionInfo) +} + +func (r *request) canAssemble() bool { + return !r.isRaw() && !r.headOnly() +} + +func (r *request) splitInfo() *objectSDK.SplitInfo { + return r.infoSplit +} + +func (r *request) containerID() cid.ID { + return r.address().Container() +} + +func (r *request) ctxRange() *objectSDK.Range { + return r.prm.rng +} + +func (r *request) headOnly() bool { + return r.prm.head +} + +func (r *request) netmapEpoch() uint64 { + return r.prm.common.NetmapEpoch() +} + +func (r *request) netmapLookupDepth() uint64 { + return r.prm.common.NetmapLookupDepth() +} + +func (r *request) initEpoch() bool { + r.curProcEpoch = r.netmapEpoch() + if r.curProcEpoch > 0 { + return true + } + + e, err := r.epochSource.Epoch() + + switch { + default: + r.status = statusUndefined + r.err = err + + r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) + + return false + case err == nil: + r.curProcEpoch = e + return true + } +} + +func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) { + obj := addr.Object() + + t, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) + + switch { + default: + r.status = statusUndefined + r.err = err + + r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) + + return nil, false + case err == nil: + return t, true + } +} + +func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) { + rs, err := r.remoteStorageConstructor.Get(info) + if err != nil { + r.status = statusUndefined + r.err = err + + r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient) + + return nil, false + } + + return rs, true +} + +func (r *request) writeCollectedHeader(ctx context.Context) bool { + if r.ctxRange() != nil { + return true + } + + err := r.prm.objWriter.WriteHeader( + ctx, + r.collectedObject.CutPayload(), + ) + + switch { + default: + r.status = statusUndefined + r.err = err + + r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err)) + case err == nil: + r.status = statusOK + r.err = nil + } + + return r.status == statusOK +} + +func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) bool { + if r.headOnly() { + return true + } + + err := r.prm.objWriter.WriteChunk(ctx, obj.Payload()) + + switch { + default: + r.status = statusUndefined + r.err = err + + r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err)) + case err == nil: + r.status = statusOK + r.err = nil + } + + return err == nil +} + +func (r *request) writeCollectedObject(ctx context.Context) { + if ok := r.writeCollectedHeader(ctx); ok { + r.writeObjectPayload(ctx, r.collectedObject) + } +} + +// isForwardingEnabled returns true if common execution +// parameters has request forwarding closure set. +func (r request) isForwardingEnabled() bool { + return r.prm.forwarder != nil +} + +// disableForwarding removes request forwarding closure from common +// parameters, so it won't be inherited in new execution contexts. +func (r *request) disableForwarding() { + r.prm.SetRequestForwarder(nil) +} + +func mergeSplitInfo(dst, src *objectSDK.SplitInfo) { + if last, ok := src.LastPart(); ok { + dst.SetLastPart(last) + } + + if link, ok := src.Link(); ok { + dst.SetLink(link) + } + + if splitID := src.SplitID(); splitID != nil { + dst.SetSplitID(splitID) + } +} diff --git a/pkg/services/object/get/res.go b/pkg/services/object/get/res.go deleted file mode 100644 index 75a5aaedd..000000000 --- a/pkg/services/object/get/res.go +++ /dev/null @@ -1,9 +0,0 @@ -package getsvc - -type RangeHashRes struct { - hashes [][]byte -} - -func (r *RangeHashRes) Hashes() [][]byte { - return r.hashes -} diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index a9391d016..bdf01a977 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -1,124 +1,54 @@ package getsvc import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" ) +// Option is a Service's constructor option. +type Option func(*Service) + // Service utility serving requests of Object.Get service. type Service struct { - *cfg -} - -// Option is a Service's constructor option. -type Option func(*cfg) - -type getClient interface { - getObject(context.Context, *execCtx, client.NodeInfo) (*object.Object, error) -} - -type cfg struct { - log *logger.Logger - - localStorage interface { - get(context.Context, *execCtx) (*object.Object, error) - } - - clientCache interface { - get(client.NodeInfo) (getClient, error) - } - - traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, error) - } - - currentEpochReceiver interface { - currentEpoch() (uint64, error) - } - - keyStore *util.KeyStorage -} - -func defaultCfg() *cfg { - return &cfg{ - log: &logger.Logger{Logger: zap.L()}, - localStorage: new(storageEngineWrapper), - clientCache: new(clientCacheWrapper), - } + log *logger.Logger + localStorage localStorage + traverserGenerator traverserGenerator + epochSource epochSource + keyStore keyStorage + remoteStorageConstructor remoteStorageConstructor } // New creates, initializes and returns utility serving // Object.Get service requests. -func New(opts ...Option) *Service { - c := defaultCfg() - - for i := range opts { - opts[i](c) +func New( + ks keyStorage, + es epochSource, + e localStorageEngine, + tg traverserGenerator, + cc clientConstructor, + opts ...Option, +) *Service { + result := &Service{ + keyStore: ks, + epochSource: es, + log: &logger.Logger{Logger: zap.L()}, + localStorage: &engineLocalStorage{ + engine: e, + }, + traverserGenerator: tg, + remoteStorageConstructor: &multiclientRemoteStorageConstructor{ + clientConstructor: cc, + }, } - - return &Service{ - cfg: c, + for _, option := range opts { + option(result) } + return result } // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))} - } -} - -// WithLocalStorageEngine returns option to set local storage -// instance. -func WithLocalStorageEngine(e *engine.StorageEngine) Option { - return func(c *cfg) { - c.localStorage.(*storageEngineWrapper).engine = e - } -} - -type ClientConstructor interface { - Get(client.NodeInfo) (client.MultiAddressClient, error) -} - -// WithClientConstructor returns option to set constructor of remote node clients. -func WithClientConstructor(v ClientConstructor) Option { - return func(c *cfg) { - c.clientCache.(*clientCacheWrapper).cache = v - } -} - -// WithTraverserGenerator returns option to set generator of -// placement traverser to get the objects from containers. -func WithTraverserGenerator(t *util.TraverserGenerator) Option { - return func(c *cfg) { - c.traverserGenerator = t - } -} - -// WithNetMapSource returns option to set network -// map storage to receive current network state. -func WithNetMapSource(nmSrc netmap.Source) Option { - return func(c *cfg) { - c.currentEpochReceiver = &nmSrcWrapper{ - nmSrc: nmSrc, - } - } -} - -// WithKeyStorage returns option to set private -// key storage for session tokens and node key. -func WithKeyStorage(store *util.KeyStorage) Option { - return func(c *cfg) { - c.keyStore = store + return func(s *Service) { + s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))} } } diff --git a/pkg/services/object/get/status.go b/pkg/services/object/get/status.go new file mode 100644 index 000000000..3a5eebe32 --- /dev/null +++ b/pkg/services/object/get/status.go @@ -0,0 +1,14 @@ +package getsvc + +const ( + statusUndefined int = iota + statusOK + statusINHUMED + statusVIRTUAL + statusOutOfRange +) + +type statusError struct { + status int + err error +} diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go new file mode 100644 index 000000000..a866132cc --- /dev/null +++ b/pkg/services/object/get/types.go @@ -0,0 +1,238 @@ +package getsvc + +import ( + "context" + "crypto/ecdsa" + "errors" + + coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" +) + +type epochSource interface { + Epoch() (uint64, error) +} + +type traverserGenerator interface { + GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, error) +} + +type keyStorage interface { + GetKey(info *util.SessionInfo) (*ecdsa.PrivateKey, error) +} + +type localStorageEngine interface { + Head(ctx context.Context, p engine.HeadPrm) (engine.HeadRes, error) + GetRange(ctx context.Context, p engine.RngPrm) (engine.RngRes, error) + Get(ctx context.Context, p engine.GetPrm) (engine.GetRes, error) +} + +type clientConstructor interface { + Get(coreclient.NodeInfo) (coreclient.MultiAddressClient, error) +} + +type remoteStorageConstructor interface { + Get(coreclient.NodeInfo) (remoteStorage, error) +} + +type multiclientRemoteStorageConstructor struct { + clientConstructor clientConstructor +} + +func (c *multiclientRemoteStorageConstructor) Get(info coreclient.NodeInfo) (remoteStorage, error) { + clt, err := c.clientConstructor.Get(info) + if err != nil { + return nil, err + } + + return &multiaddressRemoteStorage{ + client: clt, + }, nil +} + +type localStorage interface { + Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) + Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) + Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) +} + +type engineLocalStorage struct { + engine localStorageEngine +} + +func (s *engineLocalStorage) Head(ctx context.Context, address oid.Address, isRaw bool) (*objectSDK.Object, error) { + var headPrm engine.HeadPrm + headPrm.WithAddress(address) + headPrm.WithRaw(isRaw) + + r, err := s.engine.Head(ctx, headPrm) + if err != nil { + return nil, err + } + + return r.Header(), nil +} + +func (s *engineLocalStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range) (*objectSDK.Object, error) { + var getRange engine.RngPrm + getRange.WithAddress(address) + getRange.WithPayloadRange(rng) + + r, err := s.engine.GetRange(ctx, getRange) + if err != nil { + return nil, err + } + + return r.Object(), nil +} + +func (s *engineLocalStorage) Get(ctx context.Context, address oid.Address) (*objectSDK.Object, error) { + var getPrm engine.GetPrm + getPrm.WithAddress(address) + + r, err := s.engine.Get(ctx, getPrm) + if err != nil { + return nil, err + } + + return r.Object(), nil +} + +type RemoteRequestParams struct { + Epoch uint64 + TTL uint32 + PrivateKey *ecdsa.PrivateKey + SessionToken *session.Object + BearerToken *bearer.Token + XHeaders []string + IsRaw bool +} + +type remoteStorage interface { + Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) + Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) + Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) + + ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) +} + +type multiaddressRemoteStorage struct { + client coreclient.MultiAddressClient +} + +func (s *multiaddressRemoteStorage) ForwardRequest(ctx context.Context, info coreclient.NodeInfo, forwarder RequestForwarder) (*objectSDK.Object, error) { + return forwarder(ctx, info, s.client) +} + +func (s *multiaddressRemoteStorage) Range(ctx context.Context, address oid.Address, rng *objectSDK.Range, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + var prm internalclient.PayloadRangePrm + + prm.SetClient(s.client) + prm.SetTTL(requestParams.TTL) + prm.SetNetmapEpoch(requestParams.Epoch) + prm.SetAddress(address) + prm.SetPrivateKey(requestParams.PrivateKey) + prm.SetSessionToken(requestParams.SessionToken) + prm.SetBearerToken(requestParams.BearerToken) + prm.SetXHeaders(requestParams.XHeaders) + prm.SetRange(rng) + if requestParams.IsRaw { + prm.SetRawFlag() + } + + res, err := internalclient.PayloadRange(ctx, prm) + if err != nil { + var errAccessDenied *apistatus.ObjectAccessDenied + if errors.As(err, &errAccessDenied) { + obj, err := s.Get(ctx, address, requestParams) + if err != nil { + return nil, err + } + + payload := obj.Payload() + from := rng.GetOffset() + to := from + rng.GetLength() + + if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { + return nil, new(apistatus.ObjectOutOfRange) + } + + return s.payloadOnlyObject(payload[from:to]), nil + } + return nil, err + } + + return s.payloadOnlyObject(res.PayloadRange()), nil +} + +func (s *multiaddressRemoteStorage) Head(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + var prm internalclient.HeadObjectPrm + + prm.SetClient(s.client) + prm.SetTTL(requestParams.TTL) + prm.SetNetmapEpoch(requestParams.Epoch) + prm.SetAddress(address) + prm.SetPrivateKey(requestParams.PrivateKey) + prm.SetSessionToken(requestParams.SessionToken) + prm.SetBearerToken(requestParams.BearerToken) + prm.SetXHeaders(requestParams.XHeaders) + + if requestParams.IsRaw { + prm.SetRawFlag() + } + + res, err := internalclient.HeadObject(ctx, prm) + if err != nil { + return nil, err + } + + return res.Header(), nil +} + +func (s *multiaddressRemoteStorage) Get(ctx context.Context, address oid.Address, requestParams RemoteRequestParams) (*objectSDK.Object, error) { + var prm internalclient.GetObjectPrm + + prm.SetClient(s.client) + prm.SetTTL(requestParams.TTL) + prm.SetNetmapEpoch(requestParams.Epoch) + prm.SetAddress(address) + prm.SetPrivateKey(requestParams.PrivateKey) + prm.SetSessionToken(requestParams.SessionToken) + prm.SetBearerToken(requestParams.BearerToken) + prm.SetXHeaders(requestParams.XHeaders) + + if requestParams.IsRaw { + prm.SetRawFlag() + } + + res, err := internalclient.GetObject(ctx, prm) + if err != nil { + return nil, err + } + + return res.Object(), nil +} + +func (s *multiaddressRemoteStorage) payloadOnlyObject(payload []byte) *objectSDK.Object { + obj := objectSDK.New() + obj.SetPayload(payload) + + return obj +} + +type RangeHashRes struct { + hashes [][]byte +} + +func (r *RangeHashRes) Hashes() [][]byte { + return r.hashes +} diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go deleted file mode 100644 index dd4ace407..000000000 --- a/pkg/services/object/get/util.go +++ /dev/null @@ -1,261 +0,0 @@ -package getsvc - -import ( - "context" - "crypto/ecdsa" - "errors" - "io" - - coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -type SimpleObjectWriter struct { - obj *object.Object - - pld []byte -} - -type clientCacheWrapper struct { - cache ClientConstructor -} - -type clientWrapper struct { - client coreclient.MultiAddressClient -} - -type storageEngineWrapper struct { - engine *engine.StorageEngine -} - -type partWriter struct { - ObjectWriter - - headWriter HeaderWriter - - chunkWriter ChunkWriter -} - -type hasherWrapper struct { - hash io.Writer -} - -type nmSrcWrapper struct { - nmSrc netmap.Source -} - -func NewSimpleObjectWriter() *SimpleObjectWriter { - return &SimpleObjectWriter{ - obj: object.New(), - } -} - -func (s *SimpleObjectWriter) WriteHeader(_ context.Context, obj *object.Object) error { - s.obj = obj - - s.pld = make([]byte, 0, obj.PayloadSize()) - - return nil -} - -func (s *SimpleObjectWriter) WriteChunk(_ context.Context, p []byte) error { - s.pld = append(s.pld, p...) - return nil -} - -func (s *SimpleObjectWriter) Object() *object.Object { - if len(s.pld) > 0 { - s.obj.SetPayload(s.pld) - } - - return s.obj -} - -func (c *clientCacheWrapper) get(info coreclient.NodeInfo) (getClient, error) { - clt, err := c.cache.Get(info) - if err != nil { - return nil, err - } - - return &clientWrapper{ - client: clt, - }, nil -} - -func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) { - if exec.isForwardingEnabled() { - return exec.prm.forwarder(ctx, info, c.client) - } - - key, err := exec.key() - if err != nil { - return nil, err - } - - if exec.headOnly() { - return c.getHeadOnly(ctx, exec, key) - } - // we don't specify payload writer because we accumulate - // the object locally (even huge). - if rng := exec.ctxRange(); rng != nil { - // Current spec allows other storage node to deny access, - // fallback to GET here. - return c.getRange(ctx, exec, key, rng) - } - - return c.get(ctx, exec, key) -} - -func (c *clientWrapper) getRange(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey, rng *object.Range) (*object.Object, error) { - var prm internalclient.PayloadRangePrm - - prm.SetClient(c.client) - prm.SetTTL(exec.prm.common.TTL()) - prm.SetNetmapEpoch(exec.curProcEpoch) - prm.SetAddress(exec.address()) - prm.SetPrivateKey(key) - prm.SetSessionToken(exec.prm.common.SessionToken()) - prm.SetBearerToken(exec.prm.common.BearerToken()) - prm.SetXHeaders(exec.prm.common.XHeaders()) - prm.SetRange(rng) - - if exec.isRaw() { - prm.SetRawFlag() - } - - res, err := internalclient.PayloadRange(ctx, prm) - if err != nil { - var errAccessDenied *apistatus.ObjectAccessDenied - if errors.As(err, &errAccessDenied) { - obj, err := c.get(ctx, exec, key) - if err != nil { - return nil, err - } - - payload := obj.Payload() - from := rng.GetOffset() - to := from + rng.GetLength() - - if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { - return nil, new(apistatus.ObjectOutOfRange) - } - - return payloadOnlyObject(payload[from:to]), nil - } - return nil, err - } - - return payloadOnlyObject(res.PayloadRange()), nil -} - -func (c *clientWrapper) getHeadOnly(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) { - var prm internalclient.HeadObjectPrm - - prm.SetClient(c.client) - prm.SetTTL(exec.prm.common.TTL()) - prm.SetNetmapEpoch(exec.curProcEpoch) - prm.SetAddress(exec.address()) - prm.SetPrivateKey(key) - prm.SetSessionToken(exec.prm.common.SessionToken()) - prm.SetBearerToken(exec.prm.common.BearerToken()) - prm.SetXHeaders(exec.prm.common.XHeaders()) - - if exec.isRaw() { - prm.SetRawFlag() - } - - res, err := internalclient.HeadObject(ctx, prm) - if err != nil { - return nil, err - } - - return res.Header(), nil -} - -func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) { - var prm internalclient.GetObjectPrm - - prm.SetClient(c.client) - prm.SetTTL(exec.prm.common.TTL()) - prm.SetNetmapEpoch(exec.curProcEpoch) - prm.SetAddress(exec.address()) - prm.SetPrivateKey(key) - prm.SetSessionToken(exec.prm.common.SessionToken()) - prm.SetBearerToken(exec.prm.common.BearerToken()) - prm.SetXHeaders(exec.prm.common.XHeaders()) - - if exec.isRaw() { - prm.SetRawFlag() - } - - res, err := internalclient.GetObject(ctx, prm) - if err != nil { - return nil, err - } - - return res.Object(), nil -} - -func (e *storageEngineWrapper) get(ctx context.Context, exec *execCtx) (*object.Object, error) { - if exec.headOnly() { - var headPrm engine.HeadPrm - headPrm.WithAddress(exec.address()) - headPrm.WithRaw(exec.isRaw()) - - r, err := e.engine.Head(ctx, headPrm) - if err != nil { - return nil, err - } - - return r.Header(), nil - } else if rng := exec.ctxRange(); rng != nil { - var getRange engine.RngPrm - getRange.WithAddress(exec.address()) - getRange.WithPayloadRange(rng) - - r, err := e.engine.GetRange(ctx, getRange) - if err != nil { - return nil, err - } - - return r.Object(), nil - } else { - var getPrm engine.GetPrm - getPrm.WithAddress(exec.address()) - - r, err := e.engine.Get(ctx, getPrm) - if err != nil { - return nil, err - } - - return r.Object(), nil - } -} - -func (w *partWriter) WriteChunk(ctx context.Context, p []byte) error { - return w.chunkWriter.WriteChunk(ctx, p) -} - -func (w *partWriter) WriteHeader(ctx context.Context, o *object.Object) error { - return w.headWriter.WriteHeader(ctx, o) -} - -func payloadOnlyObject(payload []byte) *object.Object { - obj := object.New() - obj.SetPayload(payload) - - return obj -} - -func (h *hasherWrapper) WriteChunk(_ context.Context, p []byte) error { - _, err := h.hash.Write(p) - return err -} - -func (n *nmSrcWrapper) currentEpoch() (uint64, error) { - return n.nmSrc.Epoch() -} diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go new file mode 100644 index 000000000..01b57f1f2 --- /dev/null +++ b/pkg/services/object/get/v2/errors.go @@ -0,0 +1,92 @@ +package getsvc + +import ( + "errors" + "fmt" + + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" +) + +var ( + errMissingObjAddress = errors.New("missing object address") + errWrongMessageSeq = errors.New("incorrect message sequence") + errNilObjectPart = errors.New("nil object part") + errMissingSignature = errors.New("missing signature") + errInvalidObjectIDSign = errors.New("invalid object ID signature") + + errWrongHeaderPartTypeExpShortRecvWithSignature = fmt.Errorf("wrong header part type: expected %T, received %T", + (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil), + ) + errWrongHeaderPartTypeExpWithSignRecvShort = fmt.Errorf("wrong header part type: expected %T, received %T", + (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil), + ) +) + +func errInvalidObjAddress(err error) error { + return fmt.Errorf("invalid object address: %w", err) +} + +func errRequestParamsValidation(err error) error { + return fmt.Errorf("request params validation: %w", err) +} + +func errFetchingSessionKey(err error) error { + return fmt.Errorf("fetching session key: %w", err) +} + +func errUnknownChechsumType(t refs.ChecksumType) error { + return fmt.Errorf("unknown checksum type %v", t) +} + +func errResponseVerificationFailed(err error) error { + return fmt.Errorf("response verification failed: %w", err) +} + +func errCouldNotWriteObjHeader(err error) error { + return fmt.Errorf("could not write object header in Get forwarder: %w", err) +} + +func errStreamOpenningFailed(err error) error { + return fmt.Errorf("stream opening failed: %w", err) +} + +func errReadingResponseFailed(err error) error { + return fmt.Errorf("reading the response failed: %w", err) +} + +func errUnexpectedObjectPart(v objectV2.GetObjectPart) error { + return fmt.Errorf("unexpected object part %T", v) +} + +func errCouldNotWriteObjChunk(forwarder string, err error) error { + return fmt.Errorf("could not write object chunk in %s forwarder: %w", forwarder, err) +} + +func errCouldNotVerifyRangeResponse(resp *objectV2.GetRangeResponse, err error) error { + return fmt.Errorf("could not verify %T: %w", resp, err) +} + +func errCouldNotCreateGetRangeStream(err error) error { + return fmt.Errorf("could not create Get payload range stream: %w", err) +} + +func errUnexpectedRangePart(v objectV2.GetRangePart) error { + return fmt.Errorf("unexpected range type %T", v) +} + +func errUnexpectedHeaderPart(v objectV2.GetHeaderPart) error { + return fmt.Errorf("unexpected header type %T", v) +} + +func errMarshalID(err error) error { + return fmt.Errorf("marshal ID: %w", err) +} + +func errCantReadSignature(err error) error { + return fmt.Errorf("can't read signature: %w", err) +} + +func errSendingRequestFailed(err error) error { + return fmt.Errorf("sending the request failed: %w", err) +} diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go index 8163ae928..d11f94b26 100644 --- a/pkg/services/object/get/v2/get_forwarder.go +++ b/pkg/services/object/get/v2/get_forwarder.go @@ -4,7 +4,6 @@ import ( "context" "crypto/ecdsa" "errors" - "fmt" "io" "sync" @@ -71,13 +70,10 @@ func (f *getRequestForwarder) verifyResponse(resp *objectV2.GetResponse, pubkey // verify response structure if err := signature.VerifyServiceMessage(resp); err != nil { - return fmt.Errorf("response verification failed: %w", err) + return errResponseVerificationFailed(err) } - if err := checkStatus(resp.GetMetaHeader().GetStatus()); err != nil { - return err - } - return nil + return checkStatus(resp.GetMetaHeader().GetStatus()) } func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetObjectPartInit) error { @@ -92,20 +88,20 @@ func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetOb err = f.Stream.WriteHeader(ctx, object.NewFromV2(obj)) }) if err != nil { - return fmt.Errorf("could not write object header in Get forwarder: %w", err) + return errCouldNotWriteObjHeader(err) } return nil } func (f *getRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.GetResponseReader, error) { var getStream *rpc.GetResponseReader - err := c.RawForAddress(addr, func(cli *rpcclient.Client) error { + err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { var e error getStream, e = rpc.GetObject(cli, f.Request, rpcclient.WithContext(ctx)) return e }) if err != nil { - return nil, fmt.Errorf("stream opening failed: %w", err) + return nil, errStreamOpenningFailed(err) } return getStream, nil } @@ -130,7 +126,7 @@ func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddr } internalclient.ReportError(c, err) - return fmt.Errorf("reading the response failed: %w", err) + return errReadingResponseFailed(err) } if err := f.verifyResponse(resp, pubkey); err != nil { @@ -139,7 +135,7 @@ func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddr switch v := resp.GetBody().GetObjectPart().(type) { default: - return fmt.Errorf("unexpected object part %T", v) + return errUnexpectedObjectPart(v) case *objectV2.GetObjectPartInit: if headWas { return errWrongMessageSeq @@ -162,7 +158,7 @@ func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddr } if err = f.Stream.WriteChunk(ctx, chunk); err != nil { - return fmt.Errorf("could not write object chunk in Get forwarder: %w", err) + return errCouldNotWriteObjChunk("Get", err) } localProgress += len(origChunk) diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go index 9cf6384ed..6c744b23a 100644 --- a/pkg/services/object/get/v2/get_range_forwarder.go +++ b/pkg/services/object/get/v2/get_range_forwarder.go @@ -4,7 +4,6 @@ import ( "context" "crypto/ecdsa" "errors" - "fmt" "io" "sync" @@ -73,25 +72,22 @@ func (f *getRangeRequestForwarder) verifyResponse(resp *objectV2.GetRangeRespons // verify response structure if err := signature.VerifyServiceMessage(resp); err != nil { - return fmt.Errorf("could not verify %T: %w", resp, err) + return errCouldNotVerifyRangeResponse(resp, err) } - if err := checkStatus(resp.GetMetaHeader().GetStatus()); err != nil { - return err - } - return nil + return checkStatus(resp.GetMetaHeader().GetStatus()) } func (f *getRangeRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.ObjectRangeResponseReader, error) { // open stream var rangeStream *rpc.ObjectRangeResponseReader - err := c.RawForAddress(addr, func(cli *rpcclient.Client) error { + err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { var e error rangeStream, e = rpc.GetObjectRange(cli, f.Request, rpcclient.WithContext(ctx)) return e }) if err != nil { - return nil, fmt.Errorf("could not create Get payload range stream: %w", err) + return nil, errCouldNotCreateGetRangeStream(err) } return rangeStream, nil } @@ -108,7 +104,7 @@ func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream * break } internalclient.ReportError(c, err) - return fmt.Errorf("reading the response failed: %w", err) + return errReadingResponseFailed(err) } if err := f.verifyResponse(resp, pubkey); err != nil { @@ -117,7 +113,7 @@ func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream * switch v := resp.GetBody().GetRangePart().(type) { case nil: - return fmt.Errorf("unexpected range type %T", v) + return errUnexpectedRangePart(v) case *objectV2.GetRangePartChunk: origChunk := v.GetChunk() @@ -128,7 +124,7 @@ func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream * } if err = f.Stream.WriteChunk(ctx, chunk); err != nil { - return fmt.Errorf("could not write object chunk in GetRange forwarder: %w", err) + return errCouldNotWriteObjChunk("GetRange", err) } localProgress += len(origChunk) diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go index e1d4c02db..0c91ec5d8 100644 --- a/pkg/services/object/get/v2/head_forwarder.go +++ b/pkg/services/object/get/v2/head_forwarder.go @@ -3,8 +3,6 @@ package getsvc import ( "context" "crypto/ecdsa" - "errors" - "fmt" "sync" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" @@ -74,7 +72,7 @@ func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr ne switch v := headResp.GetBody().GetHeaderPart().(type) { case nil: - return nil, fmt.Errorf("unexpected header type %T", v) + return nil, errUnexpectedHeaderPart(v) case *objectV2.ShortHeader: if hdr, err = f.getHeaderFromShortHeader(v); err != nil { return nil, err @@ -100,9 +98,7 @@ func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr ne func (f *headRequestForwarder) getHeaderFromShortHeader(sh *objectV2.ShortHeader) (*objectV2.Header, error) { if !f.Request.GetBody().GetMainOnly() { - return nil, fmt.Errorf("wrong header part type: expected %T, received %T", - (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil), - ) + return nil, errWrongHeaderPartTypeExpShortRecvWithSignature } hdr := new(objectV2.Header) @@ -118,35 +114,32 @@ func (f *headRequestForwarder) getHeaderFromShortHeader(sh *objectV2.ShortHeader func (f *headRequestForwarder) getHeaderAndSignature(hdrWithSig *objectV2.HeaderWithSignature) (*objectV2.Header, *refs.Signature, error) { if f.Request.GetBody().GetMainOnly() { - return nil, nil, fmt.Errorf("wrong header part type: expected %T, received %T", - (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil), - ) + return nil, nil, errWrongHeaderPartTypeExpWithSignRecvShort } if hdrWithSig == nil { - return nil, nil, errors.New("nil object part") + return nil, nil, errNilObjectPart } hdr := hdrWithSig.GetHeader() idSig := hdrWithSig.GetSignature() if idSig == nil { - // TODO(@cthulhu-rider): #1387 use "const" error - return nil, nil, errors.New("missing signature") + return nil, nil, errMissingSignature } binID, err := f.ObjectAddr.Object().Marshal() if err != nil { - return nil, nil, fmt.Errorf("marshal ID: %w", err) + return nil, nil, errMarshalID(err) } var sig frostfscrypto.Signature if err := sig.ReadFromV2(*idSig); err != nil { - return nil, nil, fmt.Errorf("can't read signature: %w", err) + return nil, nil, errCantReadSignature(err) } if !sig.Verify(binID) { - return nil, nil, errors.New("invalid object ID signature") + return nil, nil, errInvalidObjectIDSign } return hdr, idSig, nil @@ -154,13 +147,13 @@ func (f *headRequestForwarder) getHeaderAndSignature(hdrWithSig *objectV2.Header func (f *headRequestForwarder) sendHeadRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*objectV2.HeadResponse, error) { var headResp *objectV2.HeadResponse - err := c.RawForAddress(addr, func(cli *rpcclient.Client) error { + err := c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { var e error headResp, e = rpc.HeadObject(cli, f.Request, rpcclient.WithContext(ctx)) return e }) if err != nil { - return nil, fmt.Errorf("sending the request failed: %w", err) + return nil, errSendingRequestFailed(err) } return headResp, nil } @@ -173,11 +166,8 @@ func (f *headRequestForwarder) verifyResponse(headResp *objectV2.HeadResponse, p // verify response structure if err := signature.VerifyServiceMessage(headResp); err != nil { - return fmt.Errorf("response verification failed: %w", err) + return errResponseVerificationFailed(err) } - if err := checkStatus(f.Response.GetMetaHeader().GetStatus()); err != nil { - return err - } - return nil + return checkStatus(f.Response.GetMetaHeader().GetStatus()) } diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index 69bed23f4..91e7a96a2 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -4,7 +4,6 @@ import ( "context" "crypto/sha256" "errors" - "fmt" "hash" "sync" @@ -24,21 +23,19 @@ import ( "git.frostfs.info/TrueCloudLab/tzhash/tz" ) -var errWrongMessageSeq = errors.New("incorrect message sequence") - func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStream) (*getsvc.Prm, error) { body := req.GetBody() addrV2 := body.GetAddress() if addrV2 == nil { - return nil, errors.New("missing object address") + return nil, errMissingObjAddress } var addr oid.Address err := addr.ReadFromV2(*addrV2) if err != nil { - return nil, fmt.Errorf("invalid object address: %w", err) + return nil, errInvalidObjAddress(err) } commonPrm, err := util.CommonPrmFromV2(req) @@ -81,14 +78,14 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get addrV2 := body.GetAddress() if addrV2 == nil { - return nil, errors.New("missing object address") + return nil, errMissingObjAddress } var addr oid.Address err := addr.ReadFromV2(*addrV2) if err != nil { - return nil, fmt.Errorf("invalid object address: %w", err) + return nil, errInvalidObjAddress(err) } commonPrm, err := util.CommonPrmFromV2(req) @@ -108,7 +105,7 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get err = p.Validate() if err != nil { - return nil, fmt.Errorf("request params validation: %w", err) + return nil, errRequestParamsValidation(err) } if !commonPrm.LocalOnly() { @@ -136,14 +133,14 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran addrV2 := body.GetAddress() if addrV2 == nil { - return nil, errors.New("missing object address") + return nil, errMissingObjAddress } var addr oid.Address err := addr.ReadFromV2(*addrV2) if err != nil { - return nil, fmt.Errorf("invalid object address: %w", err) + return nil, errInvalidObjAddress(err) } commonPrm, err := util.CommonPrmFromV2(req) @@ -167,7 +164,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran } if err != nil { - return nil, fmt.Errorf("fetching session key: %w", err) + return nil, errFetchingSessionKey(err) } p.WithCachedSignerKey(signerKey) @@ -185,7 +182,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran switch t := body.GetType(); t { default: - return nil, fmt.Errorf("unknown checksum type %v", t) + return nil, errUnknownChechsumType(t) case refs.SHA256: p.SetHashGenerator(func() hash.Hash { return sha256.New() @@ -220,14 +217,14 @@ func (s *Service) toHeadPrm(req *objectV2.HeadRequest, resp *objectV2.HeadRespon addrV2 := body.GetAddress() if addrV2 == nil { - return nil, errors.New("missing object address") + return nil, errMissingObjAddress } var objAddr oid.Address err := objAddr.ReadFromV2(*addrV2) if err != nil { - return nil, fmt.Errorf("invalid object address: %w", err) + return nil, errInvalidObjAddress(err) } commonPrm, err := util.CommonPrmFromV2(req) diff --git a/pkg/services/object/get/writer.go b/pkg/services/object/get/writer.go new file mode 100644 index 000000000..78af5db41 --- /dev/null +++ b/pkg/services/object/get/writer.go @@ -0,0 +1,84 @@ +package getsvc + +import ( + "context" + "io" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" +) + +// ChunkWriter is an interface of target component +// to write payload chunk. +type ChunkWriter interface { + WriteChunk(context.Context, []byte) error +} + +// HeaderWriter is an interface of target component +// to write object header. +type HeaderWriter interface { + WriteHeader(context.Context, *object.Object) error +} + +// ObjectWriter is an interface of target component to write object. +type ObjectWriter interface { + HeaderWriter + ChunkWriter +} + +type SimpleObjectWriter struct { + obj *object.Object + + pld []byte +} + +type partWriter struct { + ObjectWriter + + headWriter HeaderWriter + + chunkWriter ChunkWriter +} + +type hasherWrapper struct { + hash io.Writer +} + +func NewSimpleObjectWriter() *SimpleObjectWriter { + return &SimpleObjectWriter{ + obj: object.New(), + } +} + +func (s *SimpleObjectWriter) WriteHeader(_ context.Context, obj *object.Object) error { + s.obj = obj + + s.pld = make([]byte, 0, obj.PayloadSize()) + + return nil +} + +func (s *SimpleObjectWriter) WriteChunk(_ context.Context, p []byte) error { + s.pld = append(s.pld, p...) + return nil +} + +func (s *SimpleObjectWriter) Object() *object.Object { + if len(s.pld) > 0 { + s.obj.SetPayload(s.pld) + } + + return s.obj +} + +func (w *partWriter) WriteChunk(ctx context.Context, p []byte) error { + return w.chunkWriter.WriteChunk(ctx, p) +} + +func (w *partWriter) WriteHeader(ctx context.Context, o *object.Object) error { + return w.headWriter.WriteHeader(ctx, o) +} + +func (h *hasherWrapper) WriteChunk(_ context.Context, p []byte) error { + _, err := h.hash.Write(p) + return err +} diff --git a/pkg/services/object/head/remote.go b/pkg/services/object/head/remote.go index 85f076a76..bcba181f2 100644 --- a/pkg/services/object/head/remote.go +++ b/pkg/services/object/head/remote.go @@ -15,7 +15,7 @@ import ( ) type ClientConstructor interface { - Get(clientcore.NodeInfo) (clientcore.Client, error) + Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error) } // RemoteHeader represents utility for getting diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go index 10a6af271..6beb67476 100644 --- a/pkg/services/object/internal/client/client.go +++ b/pkg/services/object/internal/client/client.go @@ -8,6 +8,7 @@ import ( "fmt" "io" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -402,6 +403,9 @@ func (x PutObjectRes) ID() oid.ID { // // Returns any error which prevented the operation from completing correctly in error return. func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObject") + defer span.End() + var prmCli client.PrmObjectPutInit prmCli.MarkLocal() diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go index d8b59487e..b24218621 100644 --- a/pkg/services/object/put/distributed.go +++ b/pkg/services/object/put/distributed.go @@ -6,13 +6,14 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" "go.uber.org/zap" ) @@ -35,7 +36,7 @@ type distributedTarget struct { isLocalKey func([]byte) bool - relay func(nodeDesc) error + relay func(context.Context, nodeDesc) error fmt *object.FormatValidator @@ -116,7 +117,7 @@ func (x errIncompletePut) Error() string { return commonMsg } -func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error { +func (t *distributedTarget) WriteHeader(_ context.Context, obj *objectSDK.Object) error { t.obj = obj return nil @@ -152,7 +153,7 @@ func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdent func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error { if !node.local && t.relay != nil { - return t.relay(node) + return t.relay(ctx, node) } target := t.nodeTargetInitializer(node) @@ -198,15 +199,16 @@ func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer. if t.traversal.submitPrimaryPlacementFinish() { _, err = t.iteratePlacement(ctx) if err != nil { - t.log.Error("additional container broadcast failure", zap.Error(err)) + t.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) // we don't fail primary operation because of broadcast failure } } id, _ = t.obj.ID() - return new(transformer.AccessIdentifiers). - WithSelfID(id), nil + return &transformer.AccessIdentifiers{ + SelfID: id, + }, nil } func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool { diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go index 2e6a496f3..f07122729 100644 --- a/pkg/services/object/put/local.go +++ b/pkg/services/object/put/local.go @@ -5,24 +5,24 @@ import ( "fmt" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" ) // ObjectStorage is an object storage interface. type ObjectStorage interface { // Put must save passed object // and return any appeared error. - Put(*object.Object) error + Put(context.Context, *object.Object) error // Delete must delete passed objects // and return any appeared error. Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error // Lock must lock passed objects // and return any appeared error. - Lock(locker oid.Address, toLock []oid.ID) error + Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error // IsLocked must clarify object's lock status. - IsLocked(oid.Address) (bool, error) + IsLocked(context.Context, oid.Address) (bool, error) } type localTarget struct { @@ -47,7 +47,7 @@ func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers return nil, fmt.Errorf("could not delete objects from tombstone locally: %w", err) } case object.TypeLock: - err := t.storage.Lock(objectCore.AddressOf(t.obj), t.meta.Objects()) + err := t.storage.Lock(ctx, objectCore.AddressOf(t.obj), t.meta.Objects()) if err != nil { return nil, fmt.Errorf("could not lock object from lock objects locally: %w", err) } @@ -55,12 +55,13 @@ func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers // objects that do not change meta storage } - if err := t.storage.Put(t.obj); err != nil { + if err := t.storage.Put(ctx, t.obj); err != nil { //TODO return nil, fmt.Errorf("(%T) could not put object to local storage: %w", t, err) } id, _ := t.obj.ID() - return new(transformer.AccessIdentifiers). - WithSelfID(id), nil + return &transformer.AccessIdentifiers{ + SelfID: id, + }, nil } diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index aea5926f4..c8d1b29a2 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -1,6 +1,8 @@ package putsvc import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" @@ -17,7 +19,7 @@ type PutInitPrm struct { traverseOpts []placement.Option - relay func(client.NodeInfo, client.MultiAddressClient) error + relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error } type PutChunkPrm struct { @@ -40,7 +42,15 @@ func (p *PutInitPrm) WithObject(v *object.Object) *PutInitPrm { return p } -func (p *PutInitPrm) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm { +func (p *PutInitPrm) WithCopyNumbers(v []uint32) *PutInitPrm { + if p != nil && len(v) > 0 { + p.traverseOpts = append(p.traverseOpts, placement.WithCopyNumbers(v)) + } + + return p +} + +func (p *PutInitPrm) WithRelay(f func(context.Context, client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm { if p != nil { p.relay = f } diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/put/remote.go index e7fa124fa..bcc566b74 100644 --- a/pkg/services/object/put/remote.go +++ b/pkg/services/object/put/remote.go @@ -10,9 +10,9 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" ) type remoteTarget struct { @@ -68,8 +68,7 @@ func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifier return nil, fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err) } - return new(transformer.AccessIdentifiers). - WithSelfID(res.ID()), nil + return &transformer.AccessIdentifiers{SelfID: res.ID()}, nil } // NewRemoteSender creates, initializes and returns new RemoteSender instance. diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index fed161e03..6d0d8062e 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -10,9 +10,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) @@ -23,7 +23,7 @@ type Streamer struct { target transformer.ObjectTarget - relay func(client.NodeInfo, client.MultiAddressClient) error + relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error maxPayloadSz uint64 // network config } @@ -32,13 +32,13 @@ var errNotInit = errors.New("stream not initialized") var errInitRecall = errors.New("init recall") -func (p *Streamer) Init(prm *PutInitPrm) error { +func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { // initialize destination target if err := p.initTarget(prm); err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } - if err := p.target.WriteHeader(prm.hdr); err != nil { + if err := p.target.WriteHeader(ctx, prm.hdr); err != nil { return fmt.Errorf("(%T) could not write header to target: %w", p, err) } return nil @@ -77,7 +77,7 @@ func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error { p.relay = prm.relay // prepare untrusted-Put object target - p.target = &validatingTarget{ + p.target = &validatingPreparedTarget{ nextTarget: p.newCommonTarget(prm), fmt: p.fmtValidator, @@ -125,20 +125,15 @@ func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error { p.sessionKey = sessionKey p.target = &validatingTarget{ - fmt: p.fmtValidator, - unpreparedObject: true, - nextTarget: transformer.NewPayloadSizeLimiter( - p.maxPayloadSz, - containerSDK.IsHomomorphicHashingDisabled(prm.cnr), - func() transformer.ObjectTarget { - return transformer.NewFormatTarget(&transformer.FormatterParams{ - Key: sessionKey, - NextTarget: p.newCommonTarget(prm), - SessionToken: sToken, - NetworkState: p.networkState, - }) - }, - ), + fmt: p.fmtValidator, + nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{ + Key: sessionKey, + NextTargetInit: func() transformer.ObjectTarget { return p.newCommonTarget(prm) }, + NetworkState: p.networkState, + MaxSize: p.maxPayloadSz, + WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.cnr), + SessionToken: sToken, + }), } return nil @@ -197,9 +192,9 @@ func (p *Streamer) preparePrm(prm *PutInitPrm) error { } func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget { - var relay func(nodeDesc) error + var relay func(context.Context, nodeDesc) error if p.relay != nil { - relay = func(node nodeDesc) error { + relay = func(ctx context.Context, node nodeDesc) error { var info client.NodeInfo client.NodeInfoFromNetmapElement(&info, node.info) @@ -209,7 +204,7 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - return p.relay(info, c) + return p.relay(ctx, info, c) } } @@ -274,7 +269,7 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) { return nil, fmt.Errorf("(%T) could not close object target: %w", p, err) } - id := ids.ParentID() + id := ids.ParentID if id != nil { return &PutResponse{ id: *id, @@ -282,6 +277,6 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) { } return &PutResponse{ - id: ids.SelfID(), + id: ids.SelfID, }, nil } diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 65846ea9f..65531dc66 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -5,6 +5,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" @@ -15,6 +16,8 @@ import ( internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) type streamer struct { @@ -34,6 +37,9 @@ type sizes struct { } func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.Send") + defer span.End() + switch v := req.GetBody().GetObjectPart().(type) { case *object.PutObjectPartInit: var initPrm *putsvc.PutInitPrm @@ -43,7 +49,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) return err } - if err = s.stream.Init(initPrm); err != nil { + if err = s.stream.Init(ctx, initPrm); err != nil { err = fmt.Errorf("(%T) could not init object put stream: %w", s, err) } @@ -105,6 +111,9 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) } func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.CloseAndRecv") + defer span.End() + if s.saveChunks { // check payload size correctness if s.writtenPayload != s.payloadSz { @@ -120,7 +129,10 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error return fromPutResponse(resp), nil } -func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClient) error { +func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) error { + ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.relayRequest") + defer span.End() + // open stream resp := new(object.PutResponse) @@ -129,6 +141,12 @@ func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClien var firstErr error info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { + ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.iterateAddress", + trace.WithAttributes( + attribute.String("address", addr.String()), + )) + defer span.End() + var err error defer func() { @@ -143,8 +161,8 @@ func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClien var stream *rpc.PutRequestWriter - err = c.RawForAddress(addr, func(cli *rawclient.Client) error { - stream, err = rpc.PutObject(cli, resp) + err = c.RawForAddress(ctx, addr, func(cli *rawclient.Client) error { + stream, err = rpc.PutObject(cli, resp, rawclient.WithContext(ctx)) return err }) if err != nil { diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go index 790f061f1..758470f6c 100644 --- a/pkg/services/object/put/v2/util.go +++ b/pkg/services/object/put/v2/util.go @@ -24,7 +24,8 @@ func (s *streamer) toInitPrm(part *objectV2.PutObjectPartInit, req *objectV2.Put object.NewFromV2(oV2), ). WithRelay(s.relayRequest). - WithCommonPrm(commonPrm), nil + WithCommonPrm(commonPrm). + WithCopyNumbers(part.GetCopiesNumber()), nil } func toChunkPrm(req *objectV2.PutObjectPartChunk) *putsvc.PutChunkPrm { diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/put/validation.go index 70c6974d3..406304422 100644 --- a/pkg/services/object/put/validation.go +++ b/pkg/services/object/put/validation.go @@ -9,19 +9,24 @@ import ( "hash" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/transformer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" "git.frostfs.info/TrueCloudLab/tzhash/tz" ) -// validatingTarget validates object format and content. +// validatingTarget validates unprepared object format and content (streaming PUT case). type validatingTarget struct { nextTarget transformer.ObjectTarget fmt *object.FormatValidator +} - unpreparedObject bool +// validatingPreparedTarget validates prepared object format and content. +type validatingPreparedTarget struct { + nextTarget transformer.ObjectTarget + + fmt *object.FormatValidator hash hash.Hash @@ -41,71 +46,81 @@ var ( ErrWrongPayloadSize = errors.New("wrong payload size") ) -func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error { - t.payloadSz = obj.PayloadSize() - chunkLn := uint64(len(obj.Payload())) - - if !t.unpreparedObject { - // check chunk size - if chunkLn > t.payloadSz { - return ErrWrongPayloadSize - } - - // check payload size limit - if t.payloadSz > t.maxPayloadSz { - return ErrExceedingMaxSize - } - - cs, csSet := obj.PayloadChecksum() - if !csSet { - return errors.New("missing payload checksum") - } - - switch typ := cs.Type(); typ { - default: - return fmt.Errorf("(%T) unsupported payload checksum type %v", t, typ) - case checksum.SHA256: - t.hash = sha256.New() - case checksum.TZ: - t.hash = tz.New() - } - - t.checksum = cs.Value() - } - - if err := t.fmt.Validate(obj, t.unpreparedObject); err != nil { +func (t *validatingTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error { + if err := t.fmt.Validate(ctx, obj, true); err != nil { return fmt.Errorf("(%T) coult not validate object format: %w", t, err) } - err := t.nextTarget.WriteHeader(obj) + return t.nextTarget.WriteHeader(ctx, obj) +} + +func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) { + return t.nextTarget.Write(ctx, p) +} + +func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { + return t.nextTarget.Close(ctx) +} + +func (t *validatingPreparedTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error { + t.payloadSz = obj.PayloadSize() + chunkLn := uint64(len(obj.Payload())) + + // check chunk size + if chunkLn > t.payloadSz { + return ErrWrongPayloadSize + } + + // check payload size limit + if t.payloadSz > t.maxPayloadSz { + return ErrExceedingMaxSize + } + + cs, csSet := obj.PayloadChecksum() + if !csSet { + return errors.New("missing payload checksum") + } + + switch typ := cs.Type(); typ { + default: + return fmt.Errorf("(%T) unsupported payload checksum type %v", t, typ) + case checksum.SHA256: + t.hash = sha256.New() + case checksum.TZ: + t.hash = tz.New() + } + + t.checksum = cs.Value() + + if err := t.fmt.Validate(ctx, obj, false); err != nil { + return fmt.Errorf("(%T) coult not validate object format: %w", t, err) + } + + err := t.nextTarget.WriteHeader(ctx, obj) if err != nil { return err } - if !t.unpreparedObject { - // update written bytes - // - // Note: we MUST NOT add obj.PayloadSize() since obj - // can carry only the chunk of the full payload - t.writtenPayload += chunkLn - } + // update written bytes + // + // Note: we MUST NOT add obj.PayloadSize() since obj + // can carry only the chunk of the full payload + t.writtenPayload += chunkLn return nil } -func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) { +func (t *validatingPreparedTarget) Write(ctx context.Context, p []byte) (n int, err error) { chunkLn := uint64(len(p)) - if !t.unpreparedObject { - // check if new chunk will overflow payload size - if t.writtenPayload+chunkLn > t.payloadSz { - return 0, ErrWrongPayloadSize - } + // check if new chunk will overflow payload size + if t.writtenPayload+chunkLn > t.payloadSz { + return 0, ErrWrongPayloadSize + } - _, err = t.hash.Write(p) - if err != nil { - return - } + _, err = t.hash.Write(p) + if err != nil { + return } n, err = t.nextTarget.Write(ctx, p) @@ -116,16 +131,14 @@ func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err erro return } -func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { - if !t.unpreparedObject { - // check payload size correctness - if t.payloadSz != t.writtenPayload { - return nil, ErrWrongPayloadSize - } +func (t *validatingPreparedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) { + // check payload size correctness + if t.payloadSz != t.writtenPayload { + return nil, ErrWrongPayloadSize + } - if !bytes.Equal(t.hash.Sum(nil), t.checksum) { - return nil, fmt.Errorf("(%T) incorrect payload checksum", t) - } + if !bytes.Equal(t.hash.Sum(nil), t.checksum) { + return nil, fmt.Errorf("(%T) incorrect payload checksum", t) } return t.nextTarget.Close(ctx) diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index b158bc23e..2b6101a98 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -5,19 +5,20 @@ import ( "encoding/hex" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "go.uber.org/zap" ) func (exec *execCtx) executeOnContainer(ctx context.Context) { if exec.isLocal() { - exec.log.Debug("return result directly") + exec.log.Debug(logs.SearchReturnResultDirectly) return } lookupDepth := exec.netmapLookupDepth() - exec.log.Debug("trying to execute in container...", + exec.log.Debug(logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) @@ -48,7 +49,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) { } func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { - exec.log.Debug("process epoch", + exec.log.Debug(logs.ProcessEpoch, zap.Uint64("number", exec.curProcEpoch), ) @@ -63,7 +64,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug("no more nodes, abort placement iteration") + exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration) break } @@ -76,7 +77,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { defer wg.Done() select { case <-ctx.Done(): - exec.log.Debug("interrupt placement iteration by context", + exec.log.Debug(logs.InterruptPlacementIterationByContext, zap.String("error", ctx.Err().Error())) return default: @@ -86,7 +87,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { client.NodeInfoFromNetmapElement(&info, addrs[i]) - exec.log.Debug("processing node...", zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) + exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) c, err := exec.svc.clientConstructor.get(info) if err != nil { @@ -95,13 +96,13 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { exec.err = err mtx.Unlock() - exec.log.Debug("could not construct remote node client") + exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { - exec.log.Debug("remote operation failed", + exec.log.Debug(logs.SearchRemoteOperationFailed, zap.String("error", err.Error())) return diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index f815270d9..475a31b98 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,6 +1,7 @@ package searchsvc import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -80,7 +81,7 @@ func (exec *execCtx) initEpoch() bool { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not get current epoch number", + exec.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.String("error", err.Error()), ) @@ -99,7 +100,7 @@ func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool) exec.status = statusUndefined exec.err = err - exec.log.Debug("could not generate container traverser", + exec.log.Debug(logs.SearchCouldNotGenerateContainerTraverser, zap.String("error", err.Error()), ) @@ -118,7 +119,7 @@ func (exec *execCtx) writeIDList(ids []oid.ID) { exec.status = statusUndefined exec.err = err - exec.log.Debug("could not write object identifiers", + exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()), ) case err == nil: diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index 1e4776921..1af69caf1 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -1,17 +1,20 @@ package searchsvc import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) -func (exec *execCtx) executeLocal() { - ids, err := exec.svc.localStorage.search(exec) +func (exec *execCtx) executeLocal(ctx context.Context) { + ids, err := exec.svc.localStorage.search(ctx, exec) if err != nil { exec.status = statusUndefined exec.err = err - exec.log.Debug("local operation failed", + exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()), ) diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index 325b42a54..7a7cbfc5b 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -3,6 +3,7 @@ package searchsvc import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "go.uber.org/zap" ) @@ -23,10 +24,10 @@ func (s *Service) Search(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) { - exec.log.Debug("serving request...") + exec.log.Debug(logs.ServingRequest) // perform local operation - exec.executeLocal() + exec.executeLocal(ctx) exec.analyzeStatus(ctx, true) } @@ -35,11 +36,11 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result switch exec.status { default: - exec.log.Debug("operation finished with error", + exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", exec.err.Error()), ) case statusOK: - exec.log.Debug("operation finished successfully") + exec.log.Debug(logs.OperationFinishedSuccessfully) } if execCnr { diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index e95970955..75059103f 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -107,7 +107,7 @@ func (c *testClientCache) get(info clientcore.NodeInfo) (searchClient, error) { return v, nil } -func (s *testStorage) search(exec *execCtx) ([]oid.ID, error) { +func (s *testStorage) search(_ context.Context, exec *execCtx) ([]oid.ID, error) { v, ok := s.items[exec.containerID().EncodeToString()] if !ok { return nil, nil diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index b858e2219..708979d79 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -37,7 +37,7 @@ type cfg struct { log *logger.Logger localStorage interface { - search(*execCtx) ([]oid.ID, error) + search(context.Context, *execCtx) ([]oid.ID, error) } clientConstructor interface { diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 49f3e5efd..b5b351a3b 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -117,12 +117,12 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c return res.IDList(), nil } -func (e *storageEngineWrapper) search(exec *execCtx) ([]oid.ID, error) { +func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { var selectPrm engine.SelectPrm selectPrm.WithFilters(exec.searchFilters()) selectPrm.WithContainerID(exec.containerID()) - r, err := e.storage.Select(selectPrm) + r, err := e.storage.Select(ctx, selectPrm) if err != nil { return nil, err } diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go index 8023f2f0f..d8719986f 100644 --- a/pkg/services/object/search/v2/request_forwarder.go +++ b/pkg/services/object/search/v2/request_forwarder.go @@ -46,7 +46,7 @@ func (f *requestForwarder) forwardRequest(ctx context.Context, addr network.Addr } var searchStream *rpc.SearchResponseReader - err = c.RawForAddress(addr, func(cli *rpcclient.Client) error { + err = c.RawForAddress(ctx, addr, func(cli *rpcclient.Client) error { searchStream, err = rpc.SearchObjects(cli, f.Request, rpcclient.WithContext(ctx)) return err }) diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index beda45c0c..92beedaa7 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -1,6 +1,7 @@ package util import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" @@ -8,7 +9,7 @@ import ( // LogServiceError writes error message of object service to provided logger. func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error("object service error", + l.Error(logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), zap.String("error", err.Error()), @@ -17,7 +18,7 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er // LogWorkerPoolError writes debug error message of object worker pool to provided logger. func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error("could not push task to worker pool", + l.Error(logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), zap.String("error", err.Error()), ) diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go index 01d607020..787c04421 100644 --- a/pkg/services/object_manager/placement/netmap.go +++ b/pkg/services/object_manager/placement/netmap.go @@ -47,7 +47,7 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { } } -func (s *netMapSrc) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { +func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) { return s.nm, nil } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 75d5fbfd1..e46240a86 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -38,6 +38,7 @@ type Traverser struct { type cfg struct { trackCopies bool + copyNumbers []uint32 flatSuccess *uint32 @@ -84,19 +85,23 @@ func NewTraverser(opts ...Option) (*Traverser, error) { return nil, fmt.Errorf("could not build placement: %w", err) } + // backward compatibility for scalar `copies_number` + if len(cfg.copyNumbers) == 1 { + cfg.flatSuccess = &cfg.copyNumbers[0] + } + var rem []int if cfg.flatSuccess != nil { ns = flatNodes(ns) rem = []int{int(*cfg.flatSuccess)} } else { - replNum := cfg.policy.NumberOfReplicas() - rem = make([]int, 0, replNum) + rem = defaultCopiesVector(cfg.policy) - for i := 0; i < replNum; i++ { - if cfg.trackCopies { - rem = append(rem, int(cfg.policy.ReplicaNumberByIndex(i))) - } else { - rem = append(rem, -1) + for i := range rem { + if !cfg.trackCopies { + rem[i] = -1 + } else if len(cfg.copyNumbers) > i { + rem[i] = int(cfg.copyNumbers[i]) } } } @@ -108,6 +113,17 @@ func NewTraverser(opts ...Option) (*Traverser, error) { }, nil } +func defaultCopiesVector(policy netmap.PlacementPolicy) []int { + replNum := policy.NumberOfReplicas() + copyVector := make([]int, 0, replNum) + + for i := 0; i < replNum; i++ { + copyVector = append(copyVector, int(policy.ReplicaNumberByIndex(i))) + } + + return copyVector +} + func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo { sz := 0 for i := range ns { @@ -265,3 +281,9 @@ func WithoutSuccessTracking() Option { c.trackCopies = false } } + +func WithCopyNumbers(v []uint32) Option { + return func(c *cfg) { + c.copyNumbers = v + } +} diff --git a/pkg/services/object_manager/storagegroup/collect.go b/pkg/services/object_manager/storagegroup/collect.go deleted file mode 100644 index d9578dea3..000000000 --- a/pkg/services/object_manager/storagegroup/collect.go +++ /dev/null @@ -1,67 +0,0 @@ -package storagegroup - -import ( - objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -// CollectMembers creates new storage group structure and fills it -// with information about members collected via HeadReceiver. -// -// Resulting storage group consists of physically stored objects only. -func CollectMembers(r objutil.HeadReceiver, cnr cid.ID, members []oid.ID, calcHomoHash bool) (*storagegroup.StorageGroup, error) { - var ( - sumPhySize uint64 - phyMembers []oid.ID - phyHashes [][]byte - addr oid.Address - sg storagegroup.StorageGroup - ) - - addr.SetContainer(cnr) - - for i := range members { - addr.SetObject(members[i]) - - if err := objutil.IterateAllSplitLeaves(r, addr, func(leaf *object.Object) { - id, ok := leaf.ID() - if !ok { - return - } - - phyMembers = append(phyMembers, id) - sumPhySize += leaf.PayloadSize() - cs, _ := leaf.PayloadHomomorphicHash() - - if calcHomoHash { - phyHashes = append(phyHashes, cs.Value()) - } - }); err != nil { - return nil, err - } - } - - sg.SetMembers(phyMembers) - sg.SetValidationDataSize(sumPhySize) - - if calcHomoHash { - sumHash, err := tz.Concat(phyHashes) - if err != nil { - return nil, err - } - - var cs checksum.Checksum - tzHash := [64]byte{} - copy(tzHash[:], sumHash) - cs.SetTillichZemor(tzHash) - - sg.SetValidationDataHash(cs) - } - - return &sg, nil -} diff --git a/pkg/services/object_manager/storagegroup/search.go b/pkg/services/object_manager/storagegroup/search.go deleted file mode 100644 index 39019aa6d..000000000 --- a/pkg/services/object_manager/storagegroup/search.go +++ /dev/null @@ -1,14 +0,0 @@ -package storagegroup - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -// SearchQuery returns search query to filter -// objects with storage group content. -func SearchQuery() object.SearchFilters { - fs := object.SearchFilters{} - fs.AddTypeFilter(object.MatchStringEqual, object.TypeStorageGroup) - - return fs -} diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 4097f22bf..46fcc9840 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -5,6 +5,7 @@ import ( "strconv" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -57,7 +58,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ts, err := g.tsSource.Tombstone(ctx, a, epoch) if err != nil { log.Warn( - "tombstone getter: could not get the tombstone the source", + logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) } else { diff --git a/pkg/services/object_manager/transformer/fmt.go b/pkg/services/object_manager/transformer/fmt.go deleted file mode 100644 index 462cc7474..000000000 --- a/pkg/services/object_manager/transformer/fmt.go +++ /dev/null @@ -1,114 +0,0 @@ -package transformer - -import ( - "context" - "crypto/ecdsa" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" -) - -type formatter struct { - prm *FormatterParams - - obj *object.Object - - sz uint64 -} - -// FormatterParams groups NewFormatTarget parameters. -type FormatterParams struct { - Key *ecdsa.PrivateKey - - NextTarget ObjectTarget - - SessionToken *session.Object - - NetworkState netmap.State -} - -// NewFormatTarget returns ObjectTarget instance that finalizes object structure -// and writes it to the next target. -// -// Chunks must be written before the WriteHeader call. -// -// Object changes: -// - sets version to current SDK version; -// - sets payload size to the total length of all written chunks; -// - sets session token; -// - sets number of creation epoch; -// - calculates and sets verification fields (ID, Signature). -func NewFormatTarget(p *FormatterParams) ObjectTarget { - return &formatter{ - prm: p, - } -} - -func (f *formatter) WriteHeader(obj *object.Object) error { - f.obj = obj - - return nil -} - -func (f *formatter) Write(ctx context.Context, p []byte) (n int, err error) { - n, err = f.prm.NextTarget.Write(ctx, p) - - f.sz += uint64(n) - - return -} - -func (f *formatter) Close(ctx context.Context) (*AccessIdentifiers, error) { - curEpoch := f.prm.NetworkState.CurrentEpoch() - ver := version.Current() - - f.obj.SetVersion(&ver) - f.obj.SetPayloadSize(f.sz) - f.obj.SetSessionToken(f.prm.SessionToken) - f.obj.SetCreationEpoch(curEpoch) - - var ( - parID *oid.ID - parHdr *object.Object - ) - - if par := f.obj.Parent(); par != nil && par.Signature() == nil { - rawPar := object.NewFromV2(par.ToV2()) - - rawPar.SetSessionToken(f.prm.SessionToken) - rawPar.SetCreationEpoch(curEpoch) - - if err := object.SetIDWithSignature(*f.prm.Key, rawPar); err != nil { - return nil, fmt.Errorf("could not finalize parent object: %w", err) - } - - id, _ := rawPar.ID() - parID = &id - parHdr = rawPar - - f.obj.SetParent(parHdr) - } - - if err := object.SetIDWithSignature(*f.prm.Key, f.obj); err != nil { - return nil, fmt.Errorf("could not finalize object: %w", err) - } - - if err := f.prm.NextTarget.WriteHeader(f.obj); err != nil { - return nil, fmt.Errorf("could not write header to next target: %w", err) - } - - if _, err := f.prm.NextTarget.Close(ctx); err != nil { - return nil, fmt.Errorf("could not close next target: %w", err) - } - - id, _ := f.obj.ID() - - return new(AccessIdentifiers). - WithSelfID(id). - WithParentID(parID). - WithParent(parHdr), nil -} diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go deleted file mode 100644 index 199f5d0c1..000000000 --- a/pkg/services/object_manager/transformer/transformer.go +++ /dev/null @@ -1,294 +0,0 @@ -package transformer - -import ( - "context" - "crypto/sha256" - "fmt" - "hash" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/tzhash/tz" -) - -type payloadSizeLimiter struct { - maxSize, written uint64 - - withoutHomomorphicHash bool - - targetInit func() ObjectTarget - - target ObjectTarget - - current, parent *object.Object - - currentHashers, parentHashers []*payloadChecksumHasher - - previous []oid.ID - - chunkWriter writer - - splitID *object.SplitID - - parAttrs []object.Attribute -} - -type payloadChecksumHasher struct { - hasher hash.Hash - - checksumWriter func([]byte) -} - -// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length -// of the writing object and writes generated objects to targets from initializer. -// -// Calculates and adds homomorphic hash to resulting objects only if withoutHomomorphicHash -// is false. -// -// Objects w/ payload size less or equal than max size remain untouched. -func NewPayloadSizeLimiter(maxSize uint64, withoutHomomorphicHash bool, targetInit TargetInitializer) ObjectTarget { - return &payloadSizeLimiter{ - maxSize: maxSize, - withoutHomomorphicHash: withoutHomomorphicHash, - targetInit: targetInit, - splitID: object.NewSplitID(), - } -} - -func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error { - s.current = fromObject(hdr) - - s.initialize() - - return nil -} - -func (s *payloadSizeLimiter) Write(ctx context.Context, p []byte) (int, error) { - if err := s.writeChunk(ctx, p); err != nil { - return 0, err - } - - return len(p), nil -} - -func (s *payloadSizeLimiter) Close(ctx context.Context) (*AccessIdentifiers, error) { - return s.release(ctx, true) -} - -func (s *payloadSizeLimiter) initialize() { - // if it is an object after the 1st - if ln := len(s.previous); ln > 0 { - // initialize parent object once (after 1st object) - if ln == 1 { - s.detachParent() - } - - // set previous object to the last previous identifier - s.current.SetPreviousID(s.previous[ln-1]) - } - - s.initializeCurrent() -} - -func fromObject(obj *object.Object) *object.Object { - cnr, _ := obj.ContainerID() - - res := object.New() - res.SetContainerID(cnr) - res.SetOwnerID(obj.OwnerID()) - res.SetAttributes(obj.Attributes()...) - res.SetType(obj.Type()) - - // obj.SetSplitID creates splitHeader but we don't need to do it in case - // of small objects, so we should make nil check. - if obj.SplitID() != nil { - res.SetSplitID(obj.SplitID()) - } - - return res -} - -func (s *payloadSizeLimiter) initializeCurrent() { - // initialize current object target - s.target = s.targetInit() - - // create payload hashers - s.currentHashers = payloadHashersForObject(s.current, s.withoutHomomorphicHash) - - // compose multi-writer from target and all payload hashers - ws := make([]writer, 0, 1+len(s.currentHashers)+len(s.parentHashers)) - - ws = append(ws, s.target) - - for i := range s.currentHashers { - ws = append(ws, newWriter(s.currentHashers[i].hasher)) - } - - for i := range s.parentHashers { - ws = append(ws, newWriter(s.parentHashers[i].hasher)) - } - - s.chunkWriter = newMultiWriter(ws...) -} - -func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []*payloadChecksumHasher { - hashers := make([]*payloadChecksumHasher, 0, 2) - - hashers = append(hashers, &payloadChecksumHasher{ - hasher: sha256.New(), - checksumWriter: func(binChecksum []byte) { - if ln := len(binChecksum); ln != sha256.Size { - panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", sha256.Size, ln)) - } - - csSHA := [sha256.Size]byte{} - copy(csSHA[:], binChecksum) - - var cs checksum.Checksum - cs.SetSHA256(csSHA) - - obj.SetPayloadChecksum(cs) - }, - }) - - if !withoutHomomorphicHash { - hashers = append(hashers, &payloadChecksumHasher{ - hasher: tz.New(), - checksumWriter: func(binChecksum []byte) { - if ln := len(binChecksum); ln != tz.Size { - panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", tz.Size, ln)) - } - - csTZ := [tz.Size]byte{} - copy(csTZ[:], binChecksum) - - var cs checksum.Checksum - cs.SetTillichZemor(csTZ) - - obj.SetPayloadHomomorphicHash(cs) - }, - }) - } - - return hashers -} - -func (s *payloadSizeLimiter) release(ctx context.Context, finalize bool) (*AccessIdentifiers, error) { - // Arg finalize is true only when called from Close method. - // We finalize parent and generate linking objects only if it is more - // than 1 object in split-chain. - withParent := finalize && len(s.previous) > 0 - - if withParent { - writeHashes(s.parentHashers) - s.parent.SetPayloadSize(s.written) - s.current.SetParent(s.parent) - } - - // release current object - writeHashes(s.currentHashers) - - // release current, get its id - if err := s.target.WriteHeader(s.current); err != nil { - return nil, fmt.Errorf("could not write header: %w", err) - } - - ids, err := s.target.Close(ctx) - if err != nil { - return nil, fmt.Errorf("could not close target: %w", err) - } - - // save identifier of the released object - s.previous = append(s.previous, ids.SelfID()) - - if withParent { - // generate and release linking object - s.initializeLinking(ids.Parent()) - s.initializeCurrent() - - if _, err := s.release(ctx, false); err != nil { - return nil, fmt.Errorf("could not release linking object: %w", err) - } - } - - return ids, nil -} - -func writeHashes(hashers []*payloadChecksumHasher) { - for i := range hashers { - hashers[i].checksumWriter(hashers[i].hasher.Sum(nil)) - } -} - -func (s *payloadSizeLimiter) initializeLinking(parHdr *object.Object) { - s.current = fromObject(s.current) - s.current.SetParent(parHdr) - s.current.SetChildren(s.previous...) - s.current.SetSplitID(s.splitID) -} - -func (s *payloadSizeLimiter) writeChunk(ctx context.Context, chunk []byte) error { - // statement is true if the previous write of bytes reached exactly the boundary. - if s.written > 0 && s.written%s.maxSize == 0 { - if s.written == s.maxSize { - s.prepareFirstChild() - } - - // we need to release current object - if _, err := s.release(ctx, false); err != nil { - return fmt.Errorf("could not release object: %w", err) - } - - // initialize another object - s.initialize() - } - - var ( - ln = uint64(len(chunk)) - cut = ln - leftToEdge = s.maxSize - s.written%s.maxSize - ) - - // write bytes no further than the boundary of the current object - if ln > leftToEdge { - cut = leftToEdge - } - - if _, err := s.chunkWriter.Write(ctx, chunk[:cut]); err != nil { - return fmt.Errorf("could not write chunk to target: %w", err) - } - - // increase written bytes counter - s.written += cut - - // if there are more bytes in buffer we call method again to start filling another object - if ln > leftToEdge { - return s.writeChunk(ctx, chunk[cut:]) - } - - return nil -} - -func (s *payloadSizeLimiter) prepareFirstChild() { - // initialize split header with split ID on first object in chain - s.current.InitRelations() - s.current.SetSplitID(s.splitID) - - // cut source attributes - s.parAttrs = s.current.Attributes() - s.current.SetAttributes() - - // attributes will be added to parent in detachParent -} - -func (s *payloadSizeLimiter) detachParent() { - s.parent = s.current - s.current = fromObject(s.parent) - s.parent.ResetRelations() - s.parent.SetSignature(nil) - s.parentHashers = s.currentHashers - - // return source attributes - s.parent.SetAttributes(s.parAttrs...) -} diff --git a/pkg/services/object_manager/transformer/types.go b/pkg/services/object_manager/transformer/types.go deleted file mode 100644 index 3e6e2feff..000000000 --- a/pkg/services/object_manager/transformer/types.go +++ /dev/null @@ -1,111 +0,0 @@ -package transformer - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// AccessIdentifiers represents group of the object identifiers -// that are returned after writing the object. -// Consists of the ID of the stored object and the ID of the parent object. -type AccessIdentifiers struct { - par *oid.ID - - self oid.ID - - parHdr *object.Object -} - -// ObjectTarget is an interface of the object writer. -type ObjectTarget interface { - // WriteHeader writes object header w/ payload part. - // The payload of the object may be incomplete. - // - // Must be called exactly once. Control remains with the caller. - // Missing a call or re-calling can lead to undefined behavior - // that depends on the implementation. - // - // Must not be called after Close call. - WriteHeader(*object.Object) error - - // Write writes object payload chunk. - // - // Can be called multiple times. - // - // Must not be called after Close call. - Write(ctx context.Context, p []byte) (n int, err error) - - // Close is used to finish object writing. - // - // Close must return access identifiers of the object - // that has been written. - // - // Must be called no more than once. Control remains with the caller. - // Re-calling can lead to undefined behavior - // that depends on the implementation. - Close(ctx context.Context) (*AccessIdentifiers, error) -} - -// TargetInitializer represents ObjectTarget constructor. -type TargetInitializer func() ObjectTarget - -// SelfID returns identifier of the written object. -func (a AccessIdentifiers) SelfID() oid.ID { - return a.self -} - -// WithSelfID returns AccessIdentifiers with passed self identifier. -func (a *AccessIdentifiers) WithSelfID(v oid.ID) *AccessIdentifiers { - res := a - if res == nil { - res = new(AccessIdentifiers) - } - - res.self = v - - return res -} - -// ParentID return identifier of the parent of the written object. -func (a *AccessIdentifiers) ParentID() *oid.ID { - if a != nil { - return a.par - } - - return nil -} - -// WithParentID returns AccessIdentifiers with passed parent identifier. -func (a *AccessIdentifiers) WithParentID(v *oid.ID) *AccessIdentifiers { - res := a - if res == nil { - res = new(AccessIdentifiers) - } - - res.par = v - - return res -} - -// Parent return identifier of the parent of the written object. -func (a *AccessIdentifiers) Parent() *object.Object { - if a != nil { - return a.parHdr - } - - return nil -} - -// WithParent returns AccessIdentifiers with passed parent identifier. -func (a *AccessIdentifiers) WithParent(v *object.Object) *AccessIdentifiers { - res := a - if res == nil { - res = new(AccessIdentifiers) - } - - res.parHdr = v - - return res -} diff --git a/pkg/services/object_manager/transformer/writer.go b/pkg/services/object_manager/transformer/writer.go deleted file mode 100644 index 27aed16ff..000000000 --- a/pkg/services/object_manager/transformer/writer.go +++ /dev/null @@ -1,52 +0,0 @@ -package transformer - -import ( - "context" - "io" -) - -type writer interface { - Write(ctx context.Context, p []byte) (n int, err error) -} - -type multiWriter struct { - writers []writer -} - -func (t *multiWriter) Write(ctx context.Context, p []byte) (n int, err error) { - for _, w := range t.writers { - n, err = w.Write(ctx, p) - if err != nil { - return - } - if n != len(p) { - err = io.ErrShortWrite - return - } - } - return len(p), nil -} - -func newMultiWriter(writers ...writer) writer { - allWriters := make([]writer, 0, len(writers)) - for _, w := range writers { - if mw, ok := w.(*multiWriter); ok { - allWriters = append(allWriters, mw.writers...) - } else { - allWriters = append(allWriters, w) - } - } - return &multiWriter{allWriters} -} - -type writerWrapper struct { - Writer io.Writer -} - -func (w *writerWrapper) Write(_ context.Context, p []byte) (n int, err error) { - return w.Writer.Write(p) -} - -func newWriter(w io.Writer) writer { - return &writerWrapper{Writer: w} -} diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 9cdc4d813..e91b8871b 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -4,6 +4,7 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" @@ -73,7 +74,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add cnr, err := p.cnrSrc.Get(idCnr) if err != nil { - p.log.Error("could not get container", + p.log.Error(logs.PolicerCouldNotGetContainer, zap.Stringer("cid", idCnr), zap.String("error", err.Error()), ) @@ -84,7 +85,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add _, err := p.jobQueue.localStorage.Inhume(ctx, prm) if err != nil { - p.log.Error("could not inhume object with missing container", + p.log.Error(logs.PolicerCouldNotInhumeObjectWithMissingContainer, zap.Stringer("cid", idCnr), zap.Stringer("oid", idObj), zap.String("error", err.Error())) @@ -98,7 +99,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) if err != nil { - p.log.Error("could not build placement vector for object", + p.log.Error(logs.PolicerCouldNotBuildPlacementVectorForObject, zap.Stringer("cid", idCnr), zap.String("error", err.Error()), ) @@ -127,7 +128,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add } if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info("redundant local object copy detected", + p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", addr), ) @@ -199,7 +200,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi if isClientErrMaintenance(err) { shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) } else if err != nil { - p.log.Error("receive object header to check policy compliance", + p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), zap.String("error", err.Error()), ) @@ -228,7 +229,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach shortage-- uncheckedCopies++ - p.log.Debug("consider node under maintenance as OK", + p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(node)), ) return shortage, uncheckedCopies @@ -237,7 +238,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) { if shortage > 0 { - p.log.Debug("shortage of object copies detected", + p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", addr), zap.Uint32("shortage", shortage), ) @@ -251,7 +252,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address } else if uncheckedCopies > 0 { // If we have more copies than needed, but some of them are from the maintenance nodes, // save the local copy. - p.log.Debug("some of the copies are stored on nodes under maintenance, save local copy", + p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, zap.Int("count", uncheckedCopies)) } else if uncheckedCopies == 0 { // Safe to remove: checked all copies, shortage == 0. diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index 687216407..4a40f00ba 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -5,6 +5,7 @@ import ( "errors" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "go.uber.org/zap" @@ -12,7 +13,7 @@ import ( func (p *Policer) Run(ctx context.Context) { defer func() { - p.log.Info("routine stopped") + p.log.Info(logs.PolicerRoutineStopped) }() go p.poolCapacityWorker(ctx) @@ -39,7 +40,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { time.Sleep(time.Second) // finished whole cycle, sleep a bit continue } - p.log.Warn("failure at object select for replication", zap.Error(err)) + p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) } for i := range addrs { @@ -68,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { p.objsInWork.remove(addr.Address) }) if err != nil { - p.log.Warn("pool submission", zap.Error(err)) + p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err)) } } } @@ -91,7 +92,7 @@ func (p *Policer) poolCapacityWorker(ctx context.Context) { if p.taskPool.Cap() != newCapacity { p.taskPool.Tune(newCapacity) - p.log.Debug("tune replication capacity", + p.log.Debug(logs.PolicerTuneReplicationCapacity, zap.Float64("system_load", frostfsSysLoad), zap.Int("new_capacity", newCapacity)) } diff --git a/pkg/services/replicator/metrics.go b/pkg/services/replicator/metrics.go new file mode 100644 index 000000000..3fc062926 --- /dev/null +++ b/pkg/services/replicator/metrics.go @@ -0,0 +1,8 @@ +package replicator + +type MetricsRegister interface { + IncInFlightRequest() + DecInFlightRequest() + IncProcessedObjects() + AddPayloadSize(size int64) +} diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 53df81b77..0f82ff232 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -3,6 +3,7 @@ package replicator import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -19,8 +20,10 @@ type TaskResult interface { // HandleTask executes replication task inside invoking goroutine. // Passes all the nodes that accepted the replication to the TaskResult. func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) { + p.metrics.IncInFlightRequest() + defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug("finish work", + p.log.Debug(logs.ReplicatorFinishWork, zap.Uint32("amount of unfinished replicas", task.quantity), ) }() @@ -29,7 +32,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) var err error task.obj, err = engine.Get(ctx, p.localStorage, task.addr) if err != nil { - p.log.Error("could not get object from local storage", + p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.addr), zap.Error(err)) @@ -59,15 +62,18 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) cancel() if err != nil { - log.Error("could not replicate object", + log.Error(logs.ReplicatorCouldNotReplicateObject, zap.String("error", err.Error()), ) } else { - log.Debug("object successfully replicated") + log.Debug(logs.ReplicatorObjectSuccessfullyReplicated) task.quantity-- res.SubmitSuccessfulReplication(task.nodes[i]) + + p.metrics.IncProcessedObjects() + p.metrics.AddPayloadSize(int64(task.obj.PayloadSize())) } } } diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index 493982100..bb817cb32 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -26,6 +26,8 @@ type cfg struct { remoteSender *putsvc.RemoteSender localStorage *engine.StorageEngine + + metrics MetricsRegister } func defaultCfg() *cfg { @@ -74,3 +76,9 @@ func WithLocalStorage(v *engine.StorageEngine) Option { c.localStorage = v } } + +func WithMetrics(v MetricsRegister) Option { + return func(c *cfg) { + c.metrics = v + } +} diff --git a/pkg/services/reputation/common/deps.go b/pkg/services/reputation/common/deps.go deleted file mode 100644 index 3ea5aa88e..000000000 --- a/pkg/services/reputation/common/deps.go +++ /dev/null @@ -1,78 +0,0 @@ -package common - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -type EpochProvider interface { - // Must return epoch number to select the values. - Epoch() uint64 -} - -// Writer describes the interface for storing reputation.Trust values. -// -// This interface is provided by both local storage -// of values and remote (wrappers over the RPC). -type Writer interface { - // Write performs a write operation of reputation.Trust value - // and returns any error encountered. - // - // All values after the Close call must be flushed to the - // physical target. Implementations can cache values before - // Close operation. - // - // Write must not be called after Close. - Write(context.Context, reputation.Trust) error - - // Close exits with method-providing Writer. - // - // All cached values must be flushed before - // the Close's return. - // - // Methods must not be called after Close. - Close(context.Context) error -} - -// WriterProvider is a group of methods provided -// by entity which generates keepers of -// reputation.Trust values. -type WriterProvider interface { - // InitWriter should return an initialized Writer. - // - // Initialization problems are reported via error. - // If no error was returned, then the Writer must not be nil. - // - // Implementations can have different logic for different - // contexts, so specific ones may document their own behavior. - InitWriter(EpochProvider) (Writer, error) -} - -// ManagerBuilder defines an interface for providing a list -// of Managers for specific epoch. Implementation depends on trust value. -type ManagerBuilder interface { - // BuildManagers must compose list of managers. It depends on - // particular epoch and PeerID of the current route point. - BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) -} - -// ServerInfo describes a set of -// characteristics of a point in a route. -type ServerInfo interface { - // PublicKey returns public key of the node - // from the route in a binary representation. - PublicKey() []byte - - // Iterates over network addresses of the node - // in the route. Breaks iterating on true return - // of the handler. - IterateAddresses(func(string) bool) - - // Returns number of server's network addresses. - NumberOfAddresses() int - - // ExternalAddresses returns external addresses of a node. - ExternalAddresses() []string -} diff --git a/pkg/services/reputation/common/managers.go b/pkg/services/reputation/common/managers.go deleted file mode 100644 index ef11b8122..000000000 --- a/pkg/services/reputation/common/managers.go +++ /dev/null @@ -1,132 +0,0 @@ -package common - -import ( - "fmt" - - netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - apiNetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "git.frostfs.info/TrueCloudLab/hrw" - "go.uber.org/zap" -) - -// managerBuilder is implementation of reputation ManagerBuilder interface. -// It sorts nodes in NetMap with HRW algorithms and -// takes the next node after the current one as the only manager. -type managerBuilder struct { - log *logger.Logger - nmSrc netmapcore.Source - opts *mngOptions -} - -// ManagersPrm groups the required parameters of the managerBuilder's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type ManagersPrm struct { - NetMapSource netmapcore.Source -} - -// NewManagerBuilder creates a new instance of the managerBuilder. -// -// Panics if at least one value of the parameters is invalid. -// -// The created managerBuilder does not require additional -// initialization and is completely ready for work. -func NewManagerBuilder(prm ManagersPrm, opts ...MngOption) ManagerBuilder { - switch { - case prm.NetMapSource == nil: - panic(fmt.Sprintf("invalid NetMapSource (%T):%v", prm.NetMapSource, prm.NetMapSource)) - } - - o := defaultMngOpts() - - for i := range opts { - opts[i](o) - } - - return &managerBuilder{ - log: o.log, - nmSrc: prm.NetMapSource, - opts: o, - } -} - -// implements Server on apiNetmap.NodeInfo. -type nodeServer apiNetmap.NodeInfo - -func (x nodeServer) PublicKey() []byte { - return (apiNetmap.NodeInfo)(x).PublicKey() -} - -func (x nodeServer) IterateAddresses(f func(string) bool) { - (apiNetmap.NodeInfo)(x).IterateNetworkEndpoints(f) -} - -func (x nodeServer) NumberOfAddresses() int { - return (apiNetmap.NodeInfo)(x).NumberOfNetworkEndpoints() -} - -func (x nodeServer) ExternalAddresses() []string { - return (apiNetmap.NodeInfo)(x).ExternalAddresses() -} - -// BuildManagers sorts nodes in NetMap with HRW algorithms and -// takes the next node after the current one as the only manager. -func (mb *managerBuilder) BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) { - mb.log.Debug("start building managers", - zap.Uint64("epoch", epoch), - zap.Stringer("peer", p), - ) - - nm, err := mb.nmSrc.GetNetMapByEpoch(epoch) - if err != nil { - return nil, err - } - - nmNodes := nm.Nodes() - - // make a copy to keep order consistency of the origin netmap after sorting - nodes := make([]apiNetmap.NodeInfo, len(nmNodes)) - - copy(nodes, nmNodes) - - hrw.SortHasherSliceByValue(nodes, epoch) - - for i := range nodes { - if apireputation.ComparePeerKey(p, nodes[i].PublicKey()) { - managerIndex := i + 1 - - if managerIndex == len(nodes) { - managerIndex = 0 - } - - return []ServerInfo{nodeServer(nodes[managerIndex])}, nil - } - } - - return nil, nil -} - -type mngOptions struct { - log *logger.Logger -} - -type MngOption func(*mngOptions) - -func defaultMngOpts() *mngOptions { - return &mngOptions{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns MngOption to specify logging component. -func WithLogger(l *logger.Logger) MngOption { - return func(o *mngOptions) { - if l != nil { - o.log = l - } - } -} diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go deleted file mode 100644 index a177f6a2b..000000000 --- a/pkg/services/reputation/common/router/calls.go +++ /dev/null @@ -1,138 +0,0 @@ -package router - -import ( - "context" - "encoding/hex" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "go.uber.org/zap" -) - -// RouteInfo wraps epoch provider with additional passed -// route data. It is only used inside Router and is -// not passed in any external methods. -type RouteInfo struct { - common.EpochProvider - - passedRoute []common.ServerInfo -} - -// NewRouteInfo wraps the main context of value passing with its traversal route and epoch. -func NewRouteInfo(ep common.EpochProvider, passed []common.ServerInfo) *RouteInfo { - return &RouteInfo{ - EpochProvider: ep, - passedRoute: passed, - } -} - -type trustWriter struct { - router *Router - - routeInfo *RouteInfo - - routeMtx sync.RWMutex - mServers map[string]common.Writer -} - -// InitWriter initializes and returns Writer that sends each value to its next route point. -// -// If ep was created by NewRouteInfo, then the traversed route is taken into account, -// and the value will be sent to its continuation. Otherwise, the route will be laid -// from scratch and the value will be sent to its primary point. -// -// After building a list of remote points of the next leg of the route, the value is sent -// sequentially to all of them. If any transmissions (even all) fail, an error will not -// be returned. -// -// Close of the composed Writer calls Close method on each internal Writer generated in -// runtime and never returns an error. -// -// Always returns nil error. -func (r *Router) InitWriter(ep common.EpochProvider) (common.Writer, error) { - var ( - routeInfo *RouteInfo - ok bool - ) - - if routeInfo, ok = ep.(*RouteInfo); !ok { - routeInfo = &RouteInfo{ - EpochProvider: ep, - passedRoute: []common.ServerInfo{r.localSrvInfo}, - } - } - - return &trustWriter{ - router: r, - routeInfo: routeInfo, - mServers: make(map[string]common.Writer), - }, nil -} - -func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error { - w.routeMtx.Lock() - defer w.routeMtx.Unlock() - - route, err := w.router.routeBuilder.NextStage(w.routeInfo.Epoch(), t, w.routeInfo.passedRoute) - if err != nil { - return err - } else if len(route) == 0 { - route = []common.ServerInfo{nil} - } - - for _, remoteInfo := range route { - var key string - - if remoteInfo != nil { - key = hex.EncodeToString(remoteInfo.PublicKey()) - } - - remoteWriter, ok := w.mServers[key] - if !ok { - provider, err := w.router.remoteProvider.InitRemote(remoteInfo) - if err != nil { - w.router.log.Debug("could not initialize writer provider", - zap.String("error", err.Error()), - ) - - continue - } - - // init writer with original context wrapped in routeContext - remoteWriter, err = provider.InitWriter(w.routeInfo.EpochProvider) - if err != nil { - w.router.log.Debug("could not initialize writer", - zap.String("error", err.Error()), - ) - - continue - } - - w.mServers[key] = remoteWriter - } - - err := remoteWriter.Write(ctx, t) - if err != nil { - w.router.log.Debug("could not write the value", - zap.String("error", err.Error()), - ) - } - } - - return nil -} - -func (w *trustWriter) Close(ctx context.Context) error { - for key, wRemote := range w.mServers { - err := wRemote.Close(ctx) - if err != nil { - w.router.log.Debug("could not close remote server writer", - zap.String("key", key), - zap.String("error", err.Error()), - ) - } - } - - return nil -} diff --git a/pkg/services/reputation/common/router/deps.go b/pkg/services/reputation/common/router/deps.go deleted file mode 100644 index 36aecb59f..000000000 --- a/pkg/services/reputation/common/router/deps.go +++ /dev/null @@ -1,28 +0,0 @@ -package router - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" -) - -// Builder groups methods to route values in the network. -type Builder interface { - // NextStage must return next group of route points - // for passed epoch and trust values. - // Implementation must take into account already passed route points. - // - // Empty passed list means being at the starting point of the route. - // - // Must return empty list and no error if the endpoint of the route is reached. - NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) -} - -// RemoteWriterProvider describes the component -// for sending values to a fixed route point. -type RemoteWriterProvider interface { - // InitRemote must return WriterProvider to the route point - // corresponding to info. - // - // Nil info matches the end of the route. - InitRemote(info common.ServerInfo) (common.WriterProvider, error) -} diff --git a/pkg/services/reputation/common/router/opts.go b/pkg/services/reputation/common/router/opts.go deleted file mode 100644 index 1b3454412..000000000 --- a/pkg/services/reputation/common/router/opts.go +++ /dev/null @@ -1,28 +0,0 @@ -package router - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option sets an optional parameter of Router. -type Option func(*options) - -type options struct { - log *logger.Logger -} - -func defaultOpts() *options { - return &options{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns Option to specify logging component. -func WithLogger(l *logger.Logger) Option { - return func(o *options) { - if l != nil { - o.log = l - } - } -} diff --git a/pkg/services/reputation/common/router/router.go b/pkg/services/reputation/common/router/router.go deleted file mode 100644 index b80f6ce52..000000000 --- a/pkg/services/reputation/common/router/router.go +++ /dev/null @@ -1,81 +0,0 @@ -package router - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// Prm groups the required parameters of the Router's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Characteristics of the local node's server. - // - // Must not be nil. - LocalServerInfo common.ServerInfo - - // Component for sending values to a fixed route point. - // - // Must not be nil. - RemoteWriterProvider RemoteWriterProvider - - // Route planner. - // - // Must not be nil. - Builder Builder -} - -// Router represents component responsible for routing -// local trust values over the network. -// -// For each fixed pair (node peer, epoch) there is a -// single value route on the network. Router provides the -// interface for writing values to the next point of the route. -// -// For correct operation, Router must be created using -// the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the Router is immediately ready to work through API. -type Router struct { - log *logger.Logger - - remoteProvider RemoteWriterProvider - - routeBuilder Builder - - localSrvInfo common.ServerInfo -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -func New(prm Prm, opts ...Option) *Router { - switch { - case prm.RemoteWriterProvider == nil: - panicOnPrmValue("RemoteWriterProvider", prm.RemoteWriterProvider) - case prm.Builder == nil: - panicOnPrmValue("Builder", prm.Builder) - case prm.LocalServerInfo == nil: - panicOnPrmValue("LocalServerInfo", prm.LocalServerInfo) - } - - o := defaultOpts() - - for i := range opts { - opts[i](o) - } - - return &Router{ - log: o.log, - remoteProvider: prm.RemoteWriterProvider, - routeBuilder: prm.Builder, - localSrvInfo: prm.LocalServerInfo, - } -} diff --git a/pkg/services/reputation/common/router/util.go b/pkg/services/reputation/common/router/util.go deleted file mode 100644 index aa3190d2b..000000000 --- a/pkg/services/reputation/common/router/util.go +++ /dev/null @@ -1,40 +0,0 @@ -package router - -import ( - "bytes" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" -) - -var errWrongRoute = errors.New("wrong route") - -// CheckRoute checks if the route is a route correctly constructed by the builder for value a. -// -// Returns nil if route is correct, otherwise an error clarifying the inconsistency. -func CheckRoute(builder Builder, epoch uint64, t reputation.Trust, route []common.ServerInfo) error { - for i := 1; i < len(route); i++ { - servers, err := builder.NextStage(epoch, t, route[:i]) - if err != nil { - return err - } else if len(servers) == 0 { - break - } - - found := false - - for j := range servers { - if bytes.Equal(servers[j].PublicKey(), route[i].PublicKey()) { - found = true - break - } - } - - if !found { - return errWrongRoute - } - } - - return nil -} diff --git a/pkg/services/reputation/eigentrust/calculator/calculator.go b/pkg/services/reputation/eigentrust/calculator/calculator.go deleted file mode 100644 index bfa274fea..000000000 --- a/pkg/services/reputation/eigentrust/calculator/calculator.go +++ /dev/null @@ -1,90 +0,0 @@ -package eigentrustcalc - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" -) - -// Prm groups the required parameters of the Calculator's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Alpha parameter from origin EigenTrust algorithm - // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1. - // - // Must be in range (0, 1). - AlphaProvider AlphaProvider - - // Source of initial node trust values - // - // Must not be nil. - InitialTrustSource InitialTrustSource - - DaughterTrustSource DaughterTrustIteratorProvider - - IntermediateValueTarget common.WriterProvider - - FinalResultTarget IntermediateWriterProvider - - WorkerPool util.WorkerPool -} - -// Calculator is a processor of a single iteration of EigenTrust algorithm. -// -// For correct operation, the Calculator must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the Calculator is immediately ready to work through -// API of external control of calculations and data transfer. -type Calculator struct { - alpha, beta reputation.TrustValue // beta = 1 - alpha - - prm Prm - - opts *options -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Calculator. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Calculator does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *Calculator { - switch { - case prm.AlphaProvider == nil: - panicOnPrmValue("AlphaProvider", prm.AlphaProvider) - case prm.InitialTrustSource == nil: - panicOnPrmValue("InitialTrustSource", prm.InitialTrustSource) - case prm.DaughterTrustSource == nil: - panicOnPrmValue("DaughterTrustSource", prm.DaughterTrustSource) - case prm.IntermediateValueTarget == nil: - panicOnPrmValue("IntermediateValueTarget", prm.IntermediateValueTarget) - case prm.FinalResultTarget == nil: - panicOnPrmValue("FinalResultTarget", prm.FinalResultTarget) - case prm.WorkerPool == nil: - panicOnPrmValue("WorkerPool", prm.WorkerPool) - } - - o := defaultOpts() - - for _, opt := range opts { - opt(o) - } - - return &Calculator{ - prm: prm, - opts: o, - } -} diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go deleted file mode 100644 index a8e5cf1da..000000000 --- a/pkg/services/reputation/eigentrust/calculator/calls.go +++ /dev/null @@ -1,294 +0,0 @@ -package eigentrustcalc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" - "go.uber.org/zap" -) - -type CalculatePrm struct { - last bool - - ei eigentrust.EpochIteration -} - -func (p *CalculatePrm) SetLast(last bool) { - p.last = last -} - -func (p *CalculatePrm) SetEpochIteration(ei eigentrust.EpochIteration) { - p.ei = ei -} - -func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) { - alpha, err := c.prm.AlphaProvider.EigenTrustAlpha() - if err != nil { - c.opts.log.Debug( - "failed to get alpha param", - zap.Error(err), - ) - return - } - - c.alpha = reputation.TrustValueFromFloat64(alpha) - c.beta = reputation.TrustValueFromFloat64(1 - alpha) - - epochIteration := prm.ei - - iter := epochIteration.I() - - log := c.opts.log.With( - zap.Uint64("epoch", epochIteration.Epoch()), - zap.Uint32("iteration", iter), - ) - - if iter == 0 { - c.sendInitialValues(ctx, epochIteration) - return - } - - // decrement iteration number to select the values collected - // on the previous stage - epochIteration.SetI(iter - 1) - - consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(epochIteration) - if err != nil { - log.Debug("consumers trust iterator's init failure", - zap.String("error", err.Error()), - ) - - return - } - - // continue with initial iteration number - epochIteration.SetI(iter) - - err = consumersIter.Iterate(func(daughter apireputation.PeerID, iter TrustIterator) error { - err := c.prm.WorkerPool.Submit(func() { - c.iterateDaughter(ctx, iterDaughterPrm{ - lastIter: prm.last, - ei: epochIteration, - id: daughter, - consumersIter: iter, - }) - }) - if err != nil { - log.Debug("worker pool submit failure", - zap.String("error", err.Error()), - ) - } - - // don't stop trying - return nil - }) - if err != nil { - log.Debug("iterate daughter's consumers failed", - zap.String("error", err.Error()), - ) - } -} - -type iterDaughterPrm struct { - lastIter bool - - ei EpochIterationInfo - - id apireputation.PeerID - - consumersIter TrustIterator -} - -func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) { - initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id) - if err != nil { - c.opts.log.Debug("get initial trust failure", - zap.Stringer("daughter", p.id), - zap.String("error", err.Error()), - ) - - return - } - - daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ei, p.id) - if err != nil { - c.opts.log.Debug("daughter trust iterator's init failure", - zap.String("error", err.Error()), - ) - - return - } - - sum := reputation.TrustZero - - err = p.consumersIter.Iterate(func(trust reputation.Trust) error { - if !p.lastIter { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - } - - sum.Add(trust.Value()) - return nil - }) - if err != nil { - c.opts.log.Debug("iterate over daughter's trusts failure", - zap.String("error", err.Error()), - ) - - return - } - - // Alpha * Pd - initTrust.Mul(c.alpha) - - sum.Mul(c.beta) - sum.Add(initTrust) - - var intermediateTrust eigentrust.IterationTrust - - intermediateTrust.SetEpoch(p.ei.Epoch()) - intermediateTrust.SetPeer(p.id) - intermediateTrust.SetI(p.ei.I()) - - if p.lastIter { - c.processLastIteration(p, intermediateTrust, sum) - } else { - c.processIntermediateIteration(ctx, p, daughterIter, sum) - } -} - -func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust eigentrust.IterationTrust, sum reputation.TrustValue) { - finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei) - if err != nil { - c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), - ) - - return - } - - intermediateTrust.SetValue(sum) - - err = finalWriter.WriteIntermediateTrust(intermediateTrust) - if err != nil { - c.opts.log.Debug("write final result failure", - zap.String("error", err.Error()), - ) - - return - } -} - -func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDaughterPrm, daughterIter TrustIterator, sum reputation.TrustValue) { - intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei) - if err != nil { - c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), - ) - - return - } - - err = daughterIter.Iterate(func(trust reputation.Trust) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - val := trust.Value() - val.Mul(sum) - - trust.SetValue(val) - - err := intermediateWriter.Write(ctx, trust) - if err != nil { - c.opts.log.Debug("write value failure", - zap.String("error", err.Error()), - ) - } - - return nil - }) - if err != nil { - c.opts.log.Debug("iterate daughter trusts failure", - zap.String("error", err.Error()), - ) - } - - err = intermediateWriter.Close(ctx) - if err != nil { - c.opts.log.Error( - "could not close writer", - zap.String("error", err.Error()), - ) - } -} - -func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochIterationInfo) { - daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(epochInfo) - if err != nil { - c.opts.log.Debug("all daughters trust iterator's init failure", - zap.String("error", err.Error()), - ) - - return - } - - intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(epochInfo) - if err != nil { - c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), - ) - - return - } - - err = daughterIter.Iterate(func(daughter apireputation.PeerID, iterator TrustIterator) error { - return iterator.Iterate(func(trust reputation.Trust) error { - trusted := trust.Peer() - - initTrust, err := c.prm.InitialTrustSource.InitialTrust(trusted) - if err != nil { - c.opts.log.Debug("get initial trust failure", - zap.Stringer("peer", trusted), - zap.String("error", err.Error()), - ) - - // don't stop on single failure - return nil - } - - initTrust.Mul(trust.Value()) - trust.SetValue(initTrust) - - err = intermediateWriter.Write(ctx, trust) - if err != nil { - c.opts.log.Debug("write value failure", - zap.String("error", err.Error()), - ) - - // don't stop on single failure - } - - return nil - }) - }) - if err != nil { - c.opts.log.Debug("iterate over all daughters failure", - zap.String("error", err.Error()), - ) - } - - err = intermediateWriter.Close(ctx) - if err != nil { - c.opts.log.Debug("could not close writer", - zap.String("error", err.Error()), - ) - } -} diff --git a/pkg/services/reputation/eigentrust/calculator/deps.go b/pkg/services/reputation/eigentrust/calculator/deps.go deleted file mode 100644 index a22d1df76..000000000 --- a/pkg/services/reputation/eigentrust/calculator/deps.go +++ /dev/null @@ -1,74 +0,0 @@ -package eigentrustcalc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -type EpochIterationInfo interface { - // Must return epoch number to select the values - // for global trust calculation. - Epoch() uint64 - - // Must return the sequence number of the iteration. - I() uint32 -} - -// InitialTrustSource must provide initial(non-calculated) -// trusts to current node's daughter. Realization may depends -// on daughter. -type InitialTrustSource interface { - InitialTrust(apireputation.PeerID) (reputation.TrustValue, error) -} - -// TrustIterator must iterate over all retrieved(or calculated) trusts -// and call passed TrustHandler on them. -type TrustIterator interface { - Iterate(reputation.TrustHandler) error -} - -type PeerTrustsHandler func(apireputation.PeerID, TrustIterator) error - -// PeerTrustsIterator must iterate over all nodes(PeerIDs) and provide -// TrustIterator for iteration over node's Trusts to others peers. -type PeerTrustsIterator interface { - Iterate(PeerTrustsHandler) error -} - -type DaughterTrustIteratorProvider interface { - // InitDaughterIterator must init TrustIterator - // that iterates over received local trusts from - // daughter p for epochInfo.Epoch() epoch. - InitDaughterIterator(epochInfo EpochIterationInfo, p apireputation.PeerID) (TrustIterator, error) - // InitAllDaughtersIterator must init PeerTrustsIterator - // that must iterate over all daughters of the current - // node(manager) and all trusts received from them for - // epochInfo.Epoch() epoch. - InitAllDaughtersIterator(epochInfo EpochIterationInfo) (PeerTrustsIterator, error) - // InitConsumersIterator must init PeerTrustsIterator - // that must iterate over all daughters of the current - // node(manager) and their consumers' trusts received - // from other managers for epochInfo.Epoch() epoch and - // epochInfo.I() iteration. - InitConsumersIterator(EpochIterationInfo) (PeerTrustsIterator, error) -} - -// IntermediateWriter must write intermediate result to contract. -// It may depends on realization either trust is sent directly to contract -// or via redirecting to other node. -type IntermediateWriter interface { - WriteIntermediateTrust(eigentrust.IterationTrust) error -} - -// IntermediateWriterProvider must provide ready-to-work -// IntermediateWriter. -type IntermediateWriterProvider interface { - InitIntermediateWriter(EpochIterationInfo) (IntermediateWriter, error) -} - -// AlphaProvider must provide information about required -// alpha parameter for eigen trust algorithm. -type AlphaProvider interface { - EigenTrustAlpha() (float64, error) -} diff --git a/pkg/services/reputation/eigentrust/calculator/opts.go b/pkg/services/reputation/eigentrust/calculator/opts.go deleted file mode 100644 index e1e572361..000000000 --- a/pkg/services/reputation/eigentrust/calculator/opts.go +++ /dev/null @@ -1,30 +0,0 @@ -package eigentrustcalc - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option sets an optional parameter of Controller. -type Option func(*options) - -type options struct { - log *logger.Logger -} - -func defaultOpts() *options { - return &options{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns option to specify logging component. -// -// Ignores nil values. -func WithLogger(l *logger.Logger) Option { - return func(o *options) { - if l != nil { - o.log = l - } - } -} diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go deleted file mode 100644 index 1753a430b..000000000 --- a/pkg/services/reputation/eigentrust/controller/calls.go +++ /dev/null @@ -1,72 +0,0 @@ -package eigentrustctrl - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - "go.uber.org/zap" -) - -// ContinuePrm groups the required parameters of Continue operation. -type ContinuePrm struct { - Epoch uint64 -} - -type iterContext struct { - eigentrust.EpochIteration - - iterationNumber uint32 - last bool -} - -func (x iterContext) Last() bool { - return x.last -} - -// Continue moves the global reputation calculator to the next iteration. -func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) { - c.mtx.Lock() - - { - iterCtx, ok := c.mCtx[prm.Epoch] - if !ok { - iterCtx = new(iterContext) - c.mCtx[prm.Epoch] = iterCtx - - iterCtx.EpochIteration.SetEpoch(prm.Epoch) - - iterations, err := c.prm.IterationsProvider.EigenTrustIterations() - if err != nil { - c.opts.log.Error("could not get EigenTrust iteration number", - zap.Error(err), - ) - } else { - iterCtx.iterationNumber = uint32(iterations) - } - } - - iterCtx.last = iterCtx.I() == iterCtx.iterationNumber-1 - - err := c.prm.WorkerPool.Submit(func() { - c.prm.DaughtersTrustCalculator.Calculate(ctx, iterCtx) - - // iteration++ - iterCtx.Increment() - }) - if err != nil { - c.opts.log.Debug("iteration submit failure", - zap.String("error", err.Error()), - ) - } - - if iterCtx.last { - // will only live while the application is alive. - // during normal operation of the system. Also, such information - // number as already processed, but in any case it grows up - // In this case and worker pool failure we can mark epoch - delete(c.mCtx, prm.Epoch) - } - } - - c.mtx.Unlock() -} diff --git a/pkg/services/reputation/eigentrust/controller/controller.go b/pkg/services/reputation/eigentrust/controller/controller.go deleted file mode 100644 index a6d0d4a82..000000000 --- a/pkg/services/reputation/eigentrust/controller/controller.go +++ /dev/null @@ -1,86 +0,0 @@ -package eigentrustctrl - -import ( - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" -) - -// Prm groups the required parameters of the Controller's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Component of computing iteration of EigenTrust algorithm. - // - // Must not be nil. - DaughtersTrustCalculator DaughtersTrustCalculator - - // IterationsProvider provides information about numbers - // of iterations for algorithm. - IterationsProvider IterationsProvider - - // Routine execution pool for single epoch iteration. - WorkerPool util.WorkerPool -} - -// Controller represents EigenTrust algorithm transient controller. -// -// Controller's main goal is to separate the two main stages of -// the calculation: -// 1. reporting local values to manager nodes -// 2. calculating global trusts of child nodes -// -// Calculation stages are controlled based on external signals -// that come from the application through the Controller's API. -// -// For correct operation, the controller must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the constructor is immediately ready to work through -// API of external control of calculations and data transfer. -type Controller struct { - prm Prm - - opts *options - - mtx sync.Mutex - mCtx map[uint64]*iterContext -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Controller. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Controller does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *Controller { - switch { - case prm.IterationsProvider == nil: - panicOnPrmValue("IterationNumber", prm.IterationsProvider) - case prm.WorkerPool == nil: - panicOnPrmValue("WorkerPool", prm.WorkerPool) - case prm.DaughtersTrustCalculator == nil: - panicOnPrmValue("DaughtersTrustCalculator", prm.DaughtersTrustCalculator) - } - - o := defaultOpts() - - for _, opt := range opts { - opt(o) - } - - return &Controller{ - prm: prm, - opts: o, - mCtx: make(map[uint64]*iterContext), - } -} diff --git a/pkg/services/reputation/eigentrust/controller/deps.go b/pkg/services/reputation/eigentrust/controller/deps.go deleted file mode 100644 index c068f7cc4..000000000 --- a/pkg/services/reputation/eigentrust/controller/deps.go +++ /dev/null @@ -1,37 +0,0 @@ -package eigentrustctrl - -import "context" - -// IterationContext is a context of the i-th -// stage of iterative EigenTrust algorithm. -type IterationContext interface { - // Must return epoch number to select the values - // for global trust calculation. - Epoch() uint64 - - // Must return the sequence number of the iteration. - I() uint32 - - // Must return true if I() is the last iteration. - Last() bool -} - -// DaughtersTrustCalculator is an interface of entity -// responsible for calculating the global trust of -// daughter nodes in terms of EigenTrust algorithm. -type DaughtersTrustCalculator interface { - // Must perform the iteration step of the loop - // for computing the global trust of all daughter - // nodes and sending intermediate values - // according to EigenTrust description - // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1. - // - // Execution should be interrupted if ctx.Last(). - Calculate(ctx context.Context, iter IterationContext) -} - -// IterationsProvider must provide information about numbers -// of iterations for algorithm. -type IterationsProvider interface { - EigenTrustIterations() (uint64, error) -} diff --git a/pkg/services/reputation/eigentrust/controller/opts.go b/pkg/services/reputation/eigentrust/controller/opts.go deleted file mode 100644 index 16bc61c2f..000000000 --- a/pkg/services/reputation/eigentrust/controller/opts.go +++ /dev/null @@ -1,30 +0,0 @@ -package eigentrustctrl - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option sets an optional parameter of Controller. -type Option func(*options) - -type options struct { - log *logger.Logger -} - -func defaultOpts() *options { - return &options{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns option to specify logging component. -// -// Ignores nil values. -func WithLogger(l *logger.Logger) Option { - return func(o *options) { - if l != nil { - o.log = l - } - } -} diff --git a/pkg/services/reputation/eigentrust/iteration.go b/pkg/services/reputation/eigentrust/iteration.go deleted file mode 100644 index e4793f044..000000000 --- a/pkg/services/reputation/eigentrust/iteration.go +++ /dev/null @@ -1,44 +0,0 @@ -package eigentrust - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" -) - -type EpochIteration struct { - e uint64 - i uint32 -} - -func (x EpochIteration) Epoch() uint64 { - return x.e -} - -func (x *EpochIteration) SetEpoch(e uint64) { - x.e = e -} - -func (x EpochIteration) I() uint32 { - return x.i -} - -func (x *EpochIteration) SetI(i uint32) { - x.i = i -} - -func (x *EpochIteration) Increment() { - x.i++ -} - -type IterationTrust struct { - EpochIteration - reputation.Trust -} - -func NewEpochIteration(epoch uint64, iter uint32) *EpochIteration { - ei := EpochIteration{} - - ei.SetI(iter) - ei.SetEpoch(epoch) - - return &ei -} diff --git a/pkg/services/reputation/eigentrust/routes/builder.go b/pkg/services/reputation/eigentrust/routes/builder.go deleted file mode 100644 index ddd5a2ae0..000000000 --- a/pkg/services/reputation/eigentrust/routes/builder.go +++ /dev/null @@ -1,59 +0,0 @@ -package routes - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// Prm groups the required parameters of the Builder's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Manager builder for current node. - // - // Must not be nil. - ManagerBuilder common.ManagerBuilder - - Log *logger.Logger -} - -// Builder represents component that routes node to its managers. -// -// For correct operation, Builder must be created using -// the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the Builder is immediately ready to work through API. -type Builder struct { - managerBuilder common.ManagerBuilder - log *logger.Logger -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Builder. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Builder does not require additional -// initialization and is completely ready for work. -func New(prm Prm) *Builder { - switch { - case prm.ManagerBuilder == nil: - panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder) - case prm.Log == nil: - panicOnPrmValue("Logger", prm.Log) - } - - return &Builder{ - managerBuilder: prm.ManagerBuilder, - log: prm.Log, - } -} diff --git a/pkg/services/reputation/eigentrust/routes/calls.go b/pkg/services/reputation/eigentrust/routes/calls.go deleted file mode 100644 index c4d9688a9..000000000 --- a/pkg/services/reputation/eigentrust/routes/calls.go +++ /dev/null @@ -1,32 +0,0 @@ -package routes - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "go.uber.org/zap" -) - -// NextStage builds Manager list for trusted node and returns it directly. -// -// If passed route has more than one point, then endpoint of the route is reached. -func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) { - passedLen := len(passed) - - b.log.Debug("building next stage for trust route", - zap.Uint64("epoch", epoch), - zap.Int("passed_length", passedLen), - ) - - if passedLen > 1 { - return nil, nil - } - - route, err := b.managerBuilder.BuildManagers(epoch, t.Peer()) - if err != nil { - return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err) - } - - return route, nil -} diff --git a/pkg/services/reputation/eigentrust/storage/consumers/calls.go b/pkg/services/reputation/eigentrust/storage/consumers/calls.go deleted file mode 100644 index 55a4d6f3d..000000000 --- a/pkg/services/reputation/eigentrust/storage/consumers/calls.go +++ /dev/null @@ -1,201 +0,0 @@ -package consumerstorage - -import ( - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust" - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// Put saves intermediate trust of the consumer to daughter peer. -func (x *Storage) Put(trust eigentrust.IterationTrust) { - var s *iterationConsumersStorage - - x.mtx.Lock() - - { - epoch := trust.Epoch() - - s = x.mItems[epoch] - if s == nil { - s = &iterationConsumersStorage{ - mItems: make(map[uint32]*ConsumersStorage, 1), - } - - x.mItems[epoch] = s - } - } - - x.mtx.Unlock() - - s.put(trust) -} - -// Consumers returns the storage of trusts of the consumers of the daughter peers -// for particular iteration of EigenTrust calculation for particular epoch. -// -// Returns false if there is no data for the epoch and iter. -func (x *Storage) Consumers(epoch uint64, iter uint32) (*ConsumersStorage, bool) { - var ( - s *iterationConsumersStorage - ok bool - ) - - x.mtx.Lock() - - { - s, ok = x.mItems[epoch] - } - - x.mtx.Unlock() - - if !ok { - return nil, false - } - - return s.consumers(iter) -} - -// maps iteration numbers of EigenTrust algorithm to repositories -// of the trusts of the consumers of the daughter peers. -type iterationConsumersStorage struct { - mtx sync.RWMutex - - mItems map[uint32]*ConsumersStorage -} - -func (x *iterationConsumersStorage) put(trust eigentrust.IterationTrust) { - var s *ConsumersStorage - - x.mtx.Lock() - - { - iter := trust.I() - - s = x.mItems[iter] - if s == nil { - s = &ConsumersStorage{ - mItems: make(map[string]*ConsumersTrusts, 1), - } - - x.mItems[iter] = s - } - } - - x.mtx.Unlock() - - s.put(trust) -} - -func (x *iterationConsumersStorage) consumers(iter uint32) (s *ConsumersStorage, ok bool) { - x.mtx.Lock() - - { - s, ok = x.mItems[iter] - } - - x.mtx.Unlock() - - return -} - -// ConsumersStorage represents in-memory storage of intermediate trusts -// of the peer consumers. -// -// Maps daughter peers to repositories of the trusts of their consumers. -type ConsumersStorage struct { - mtx sync.RWMutex - - mItems map[string]*ConsumersTrusts -} - -func (x *ConsumersStorage) put(trust eigentrust.IterationTrust) { - var s *ConsumersTrusts - - x.mtx.Lock() - - { - daughter := trust.Peer().EncodeToString() - - s = x.mItems[daughter] - if s == nil { - s = &ConsumersTrusts{ - mItems: make(map[string]reputation.Trust, 1), - } - - x.mItems[daughter] = s - } - } - - x.mtx.Unlock() - - s.put(trust) -} - -// Iterate passes IDs of the daughter peers with the trusts of their consumers to h. -// -// Returns errors from h directly. -func (x *ConsumersStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) { - x.mtx.RLock() - - { - for strTrusted, trusts := range x.mItems { - var trusted apireputation.PeerID - - if strTrusted != "" { - err = trusted.DecodeString(strTrusted) - if err != nil { - panic(fmt.Sprintf("decode peer ID string %s: %v", strTrusted, err)) - } - } - - if err = h(trusted, trusts); err != nil { - break - } - } - } - - x.mtx.RUnlock() - - return -} - -// ConsumersTrusts represents in-memory storage of the trusts -// of the consumer peers to some other peer. -type ConsumersTrusts struct { - mtx sync.RWMutex - - mItems map[string]reputation.Trust -} - -func (x *ConsumersTrusts) put(trust eigentrust.IterationTrust) { - x.mtx.Lock() - - { - x.mItems[trust.TrustingPeer().EncodeToString()] = trust.Trust - } - - x.mtx.Unlock() -} - -// Iterate passes all stored trusts to h. -// -// Returns errors from h directly. -func (x *ConsumersTrusts) Iterate(h reputation.TrustHandler) (err error) { - x.mtx.RLock() - - { - for _, trust := range x.mItems { - if err = h(trust); err != nil { - break - } - } - } - - x.mtx.RUnlock() - - return -} diff --git a/pkg/services/reputation/eigentrust/storage/consumers/storage.go b/pkg/services/reputation/eigentrust/storage/consumers/storage.go deleted file mode 100644 index ee811d84b..000000000 --- a/pkg/services/reputation/eigentrust/storage/consumers/storage.go +++ /dev/null @@ -1,40 +0,0 @@ -package consumerstorage - -import ( - "sync" -) - -// Prm groups the required parameters of the Storage's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -// -// The component is not parameterizable at the moment. -type Prm struct{} - -// Storage represents in-memory storage of the trusts -// of the consumer peers. -// -// It maps epoch numbers to the repositories of intermediate -// trusts of the consumers of the daughter peers. -// -// For correct operation, Storage must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// Storage is immediately ready to work through API. -type Storage struct { - mtx sync.RWMutex - - mItems map[uint64]*iterationConsumersStorage -} - -// New creates a new instance of the Storage. -// -// The created Storage does not require additional -// initialization and is completely ready for work. -func New(_ Prm) *Storage { - return &Storage{ - mItems: make(map[uint64]*iterationConsumersStorage), - } -} diff --git a/pkg/services/reputation/eigentrust/storage/daughters/calls.go b/pkg/services/reputation/eigentrust/storage/daughters/calls.go deleted file mode 100644 index eb229365e..000000000 --- a/pkg/services/reputation/eigentrust/storage/daughters/calls.go +++ /dev/null @@ -1,177 +0,0 @@ -package daughters - -import ( - "fmt" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// Put saves daughter peer's trust to its provider for the epoch. -func (x *Storage) Put(epoch uint64, trust reputation.Trust) { - var s *DaughterStorage - - x.mtx.Lock() - - { - s = x.mItems[epoch] - if s == nil { - s = &DaughterStorage{ - mItems: make(map[string]*DaughterTrusts, 1), - } - - x.mItems[epoch] = s - } - } - - x.mtx.Unlock() - - s.put(trust) -} - -// DaughterTrusts returns daughter trusts for the epoch. -// -// Returns false if there is no data for the epoch and daughter. -func (x *Storage) DaughterTrusts(epoch uint64, daughter apireputation.PeerID) (*DaughterTrusts, bool) { - var ( - s *DaughterStorage - ok bool - ) - - x.mtx.RLock() - - { - s, ok = x.mItems[epoch] - } - - x.mtx.RUnlock() - - if !ok { - return nil, false - } - - return s.daughterTrusts(daughter) -} - -// AllDaughterTrusts returns daughter iterator for the epoch. -// -// Returns false if there is no data for the epoch and daughter. -func (x *Storage) AllDaughterTrusts(epoch uint64) (*DaughterStorage, bool) { - x.mtx.RLock() - defer x.mtx.RUnlock() - - s, ok := x.mItems[epoch] - - return s, ok -} - -// DaughterStorage maps IDs of daughter peers to repositories of the local trusts to their providers. -type DaughterStorage struct { - mtx sync.RWMutex - - mItems map[string]*DaughterTrusts -} - -// Iterate passes IDs of the daughter peers with their trusts to h. -// -// Returns errors from h directly. -func (x *DaughterStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) { - x.mtx.RLock() - - { - for strDaughter, daughterTrusts := range x.mItems { - var daughter apireputation.PeerID - - if strDaughter != "" { - err = daughter.DecodeString(strDaughter) - if err != nil { - panic(fmt.Sprintf("decode peer ID string %s: %v", strDaughter, err)) - } - } - - if err = h(daughter, daughterTrusts); err != nil { - break - } - } - } - - x.mtx.RUnlock() - - return -} - -func (x *DaughterStorage) put(trust reputation.Trust) { - var dt *DaughterTrusts - - x.mtx.Lock() - - { - trusting := trust.TrustingPeer().EncodeToString() - - dt = x.mItems[trusting] - if dt == nil { - dt = &DaughterTrusts{ - mItems: make(map[string]reputation.Trust, 1), - } - - x.mItems[trusting] = dt - } - } - - x.mtx.Unlock() - - dt.put(trust) -} - -func (x *DaughterStorage) daughterTrusts(id apireputation.PeerID) (dt *DaughterTrusts, ok bool) { - x.mtx.RLock() - - { - dt, ok = x.mItems[id.EncodeToString()] - } - - x.mtx.RUnlock() - - return -} - -// DaughterTrusts represents in-memory storage of local trusts -// of the daughter peer to its providers. -// -// Maps IDs of daughter's providers to the local trusts to them. -type DaughterTrusts struct { - mtx sync.RWMutex - - mItems map[string]reputation.Trust -} - -func (x *DaughterTrusts) put(trust reputation.Trust) { - x.mtx.Lock() - - { - x.mItems[trust.Peer().EncodeToString()] = trust - } - - x.mtx.Unlock() -} - -// Iterate passes all stored trusts to h. -// -// Returns errors from h directly. -func (x *DaughterTrusts) Iterate(h reputation.TrustHandler) (err error) { - x.mtx.RLock() - - { - for _, trust := range x.mItems { - if err = h(trust); err != nil { - break - } - } - } - - x.mtx.RUnlock() - - return -} diff --git a/pkg/services/reputation/eigentrust/storage/daughters/storage.go b/pkg/services/reputation/eigentrust/storage/daughters/storage.go deleted file mode 100644 index 26399fce4..000000000 --- a/pkg/services/reputation/eigentrust/storage/daughters/storage.go +++ /dev/null @@ -1,38 +0,0 @@ -package daughters - -import "sync" - -// Prm groups the required parameters of the Storage's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -// -// The component is not parameterizable at the moment. -type Prm struct{} - -// Storage represents in-memory storage of local trust -// values of the daughter peers. -// -// It maps epoch numbers to the repositories of local trusts -// of the daughter peers. -// -// For correct operation, Storage must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// Storage is immediately ready to work through API. -type Storage struct { - mtx sync.RWMutex - - mItems map[uint64]*DaughterStorage -} - -// New creates a new instance of the Storage. -// -// The created Storage does not require additional -// initialization and is completely ready for work. -func New(_ Prm) *Storage { - return &Storage{ - mItems: make(map[uint64]*DaughterStorage), - } -} diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go deleted file mode 100644 index 80fa772d6..000000000 --- a/pkg/services/reputation/local/controller/calls.go +++ /dev/null @@ -1,192 +0,0 @@ -package trustcontroller - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// ReportPrm groups the required parameters of the Controller.Report method. -type ReportPrm struct { - epoch uint64 -} - -// SetEpoch sets epoch number to select reputation values. -func (p *ReportPrm) SetEpoch(e uint64) { - p.epoch = e -} - -// Report reports local reputation values. -// -// Single Report operation overtakes all data from LocalTrustSource -// to LocalTrustTarget (Controller's parameters). -// -// Each call acquires a report context for an Epoch parameter. -// At the very end of the operation, the context is released. -func (c *Controller) Report(ctx context.Context, prm ReportPrm) { - // acquire report - rCtx, reporter := c.acquireReporter(ctx, prm.epoch) - if reporter == nil { - return - } - - // report local trust values - reporter.report(rCtx) - - // finally stop and free the report - c.freeReport(prm.epoch, reporter.log) -} - -type reporter struct { - epoch uint64 - - ctrl *Controller - - log *logger.Logger - - ep common.EpochProvider -} - -type epochProvider struct { - epoch uint64 -} - -func (c epochProvider) Epoch() uint64 { - return c.epoch -} - -func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context.Context, *reporter) { - started := true - - c.mtx.Lock() - { - if cancel := c.mCtx[epoch]; cancel == nil { - ctx, cancel = context.WithCancel(ctx) - c.mCtx[epoch] = cancel - started = false - } - } - c.mtx.Unlock() - - log := &logger.Logger{Logger: c.opts.log.With( - zap.Uint64("epoch", epoch), - )} - - if started { - log.Debug("report is already started") - return ctx, nil - } - - return ctx, &reporter{ - epoch: epoch, - ctrl: c, - log: log, - ep: &epochProvider{ - epoch: epoch, - }, - } -} - -func (c *reporter) report(ctx context.Context) { - c.log.Debug("starting to report local trust values") - - // initialize iterator over locally collected values - iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ep) - if err != nil { - c.log.Debug("could not initialize iterator over local trust values", - zap.String("error", err.Error()), - ) - - return - } - - // initialize target of local trust values - targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ep) - if err != nil { - c.log.Debug("could not initialize local trust target", - zap.String("error", err.Error()), - ) - - return - } - - // iterate over all values and write them to the target - err = iterator.Iterate( - func(t reputation.Trust) error { - // check if context is done - if err := ctx.Err(); err != nil { - return err - } - - return targetWriter.Write(ctx, t) - }, - ) - if err != nil && !errors.Is(err, context.Canceled) { - c.log.Debug("iterator over local trust failed", - zap.String("error", err.Error()), - ) - - return - } - - // finish writing - err = targetWriter.Close(ctx) - if err != nil { - c.log.Debug("could not finish writing local trust values", - zap.String("error", err.Error()), - ) - - return - } - - c.log.Debug("reporting successfully finished") -} - -func (c *Controller) freeReport(epoch uint64, log *logger.Logger) { - var stopped bool - - c.mtx.Lock() - - { - var cancel context.CancelFunc - - cancel, stopped = c.mCtx[epoch] - - if stopped { - cancel() - delete(c.mCtx, epoch) - } - } - - c.mtx.Unlock() - - if stopped { - log.Debug("reporting successfully interrupted") - } else { - log.Debug("reporting is not started or already interrupted") - } -} - -// StopPrm groups the required parameters of the Controller.Stop method. -type StopPrm struct { - epoch uint64 -} - -// SetEpoch sets epoch number the processing of the values of which must be interrupted. -func (p *StopPrm) SetEpoch(e uint64) { - p.epoch = e -} - -// Stop interrupts the processing of local trust values. -// -// Releases acquired report context. -func (c *Controller) Stop(prm StopPrm) { - c.freeReport( - prm.epoch, - &logger.Logger{Logger: c.opts.log.With(zap.Uint64("epoch", prm.epoch))}, - ) -} diff --git a/pkg/services/reputation/local/controller/controller.go b/pkg/services/reputation/local/controller/controller.go deleted file mode 100644 index 373df36db..000000000 --- a/pkg/services/reputation/local/controller/controller.go +++ /dev/null @@ -1,84 +0,0 @@ -package trustcontroller - -import ( - "context" - "fmt" - "sync" - - reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router" -) - -// Prm groups the required parameters of the Controller's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Iterator over the reputation values - // collected by the node locally. - // - // Must not be nil. - LocalTrustSource IteratorProvider - - // Place of recording the local values of - // trust to other nodes. - // - // Must not be nil. - LocalTrustTarget *reputationrouter.Router -} - -// Controller represents main handler for starting -// and interrupting the reporting local trust values. -// -// It binds the interfaces of the local value stores -// to the target storage points. Controller is abstracted -// from the internal storage device and the network location -// of the connecting components. At its core, it is a -// high-level start-stop trigger for reporting. -// -// For correct operation, the controller must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the constructor is immediately ready to work through -// API of external control of calculations and data transfer. -type Controller struct { - prm Prm - - opts *options - - mtx sync.Mutex - mCtx map[uint64]context.CancelFunc -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Controller. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Controller does not require additional -// initialization and is completely ready for work. -func New(prm Prm, opts ...Option) *Controller { - switch { - case prm.LocalTrustSource == nil: - panicOnPrmValue("LocalTrustSource", prm.LocalTrustSource) - case prm.LocalTrustTarget == nil: - panicOnPrmValue("LocalTrustTarget", prm.LocalTrustTarget) - } - - o := defaultOpts() - - for _, opt := range opts { - opt(o) - } - - return &Controller{ - prm: prm, - opts: o, - mCtx: make(map[uint64]context.CancelFunc), - } -} diff --git a/pkg/services/reputation/local/controller/deps.go b/pkg/services/reputation/local/controller/deps.go deleted file mode 100644 index 6f4a29c99..000000000 --- a/pkg/services/reputation/local/controller/deps.go +++ /dev/null @@ -1,34 +0,0 @@ -package trustcontroller - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" -) - -// Iterator is a group of methods provided by entity -// which can iterate over a group of reputation.Trust values. -type Iterator interface { - // Iterate must start an iterator over all trust values. - // For each value should call a handler, the error - // of which should be directly returned from the method. - // - // Internal failures of the iterator are also signaled via - // an error. After a successful call to the last value - // handler, nil should be returned. - Iterate(reputation.TrustHandler) error -} - -// IteratorProvider is a group of methods provided -// by entity which generates iterators over -// reputation.Trust values. -type IteratorProvider interface { - // InitIterator should return an initialized Iterator - // that iterates over values from IteratorContext.Epoch() epoch. - // - // Initialization problems are reported via error. - // If no error was returned, then the Iterator must not be nil. - // - // Implementations can have different logic for different - // contexts, so specific ones may document their own behavior. - InitIterator(common.EpochProvider) (Iterator, error) -} diff --git a/pkg/services/reputation/local/controller/opts.go b/pkg/services/reputation/local/controller/opts.go deleted file mode 100644 index 385a4243b..000000000 --- a/pkg/services/reputation/local/controller/opts.go +++ /dev/null @@ -1,30 +0,0 @@ -package trustcontroller - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// Option sets an optional parameter of Controller. -type Option func(*options) - -type options struct { - log *logger.Logger -} - -func defaultOpts() *options { - return &options{ - log: &logger.Logger{Logger: zap.L()}, - } -} - -// WithLogger returns option to specify logging component. -// -// Ignores nil values. -func WithLogger(l *logger.Logger) Option { - return func(o *options) { - if l != nil { - o.log = l - } - } -} diff --git a/pkg/services/reputation/local/controller/util.go b/pkg/services/reputation/local/controller/util.go deleted file mode 100644 index 122550498..000000000 --- a/pkg/services/reputation/local/controller/util.go +++ /dev/null @@ -1,32 +0,0 @@ -package trustcontroller - -import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - -type storageWrapper struct { - w common.Writer - i Iterator -} - -func (s storageWrapper) InitIterator(common.EpochProvider) (Iterator, error) { - return s.i, nil -} - -func (s storageWrapper) InitWriter(common.EpochProvider) (common.Writer, error) { - return s.w, nil -} - -// SimpleIteratorProvider returns IteratorProvider that provides -// static context-independent Iterator. -func SimpleIteratorProvider(i Iterator) IteratorProvider { - return &storageWrapper{ - i: i, - } -} - -// SimpleWriterProvider returns WriterProvider that provides -// static context-independent Writer. -func SimpleWriterProvider(w common.Writer) common.WriterProvider { - return &storageWrapper{ - w: w, - } -} diff --git a/pkg/services/reputation/local/routes/builder.go b/pkg/services/reputation/local/routes/builder.go deleted file mode 100644 index ddd5a2ae0..000000000 --- a/pkg/services/reputation/local/routes/builder.go +++ /dev/null @@ -1,59 +0,0 @@ -package routes - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// Prm groups the required parameters of the Builder's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct { - // Manager builder for current node. - // - // Must not be nil. - ManagerBuilder common.ManagerBuilder - - Log *logger.Logger -} - -// Builder represents component that routes node to its managers. -// -// For correct operation, Builder must be created using -// the constructor (New) based on the required parameters -// and optional components. After successful creation, -// the Builder is immediately ready to work through API. -type Builder struct { - managerBuilder common.ManagerBuilder - log *logger.Logger -} - -const invalidPrmValFmt = "invalid parameter %s (%T):%v" - -func panicOnPrmValue(n string, v any) { - panic(fmt.Sprintf(invalidPrmValFmt, n, v, v)) -} - -// New creates a new instance of the Builder. -// -// Panics if at least one value of the parameters is invalid. -// -// The created Builder does not require additional -// initialization and is completely ready for work. -func New(prm Prm) *Builder { - switch { - case prm.ManagerBuilder == nil: - panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder) - case prm.Log == nil: - panicOnPrmValue("Logger", prm.Log) - } - - return &Builder{ - managerBuilder: prm.ManagerBuilder, - log: prm.Log, - } -} diff --git a/pkg/services/reputation/local/routes/calls.go b/pkg/services/reputation/local/routes/calls.go deleted file mode 100644 index f0eae16fe..000000000 --- a/pkg/services/reputation/local/routes/calls.go +++ /dev/null @@ -1,32 +0,0 @@ -package routes - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common" - "go.uber.org/zap" -) - -// NextStage builds Manager list for trusting node and returns it directly. -// -// If passed route has more than one point, then endpoint of the route is reached. -func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) { - passedLen := len(passed) - - b.log.Debug("building next stage for local trust route", - zap.Uint64("epoch", epoch), - zap.Int("passed_length", passedLen), - ) - - if passedLen > 1 { - return nil, nil - } - - route, err := b.managerBuilder.BuildManagers(epoch, t.TrustingPeer()) - if err != nil { - return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err) - } - - return route, nil -} diff --git a/pkg/services/reputation/local/storage/calls.go b/pkg/services/reputation/local/storage/calls.go deleted file mode 100644 index 14acbb64f..000000000 --- a/pkg/services/reputation/local/storage/calls.go +++ /dev/null @@ -1,175 +0,0 @@ -package truststorage - -import ( - "errors" - "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation" - apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// UpdatePrm groups the parameters of Storage's Update operation. -type UpdatePrm struct { - sat bool - - epoch uint64 - - peer apireputation.PeerID -} - -// SetEpoch sets number of the epoch -// when the interaction happened. -func (p *UpdatePrm) SetEpoch(e uint64) { - p.epoch = e -} - -// SetPeer sets identifier of the peer -// with which the local node interacted. -func (p *UpdatePrm) SetPeer(id apireputation.PeerID) { - p.peer = id -} - -// SetSatisfactory sets successful completion status. -func (p *UpdatePrm) SetSatisfactory(sat bool) { - p.sat = sat -} - -type trustValue struct { - sat, all int -} - -// EpochTrustValueStorage represents storage of -// the trust values by particular epoch. -type EpochTrustValueStorage struct { - mtx sync.RWMutex - - mItems map[string]*trustValue -} - -func newTrustValueStorage() *EpochTrustValueStorage { - return &EpochTrustValueStorage{ - mItems: make(map[string]*trustValue, 1), - } -} - -func stringifyPeerID(id apireputation.PeerID) string { - return string(id.PublicKey()) -} - -func peerIDFromString(str string) (res apireputation.PeerID) { - res.SetPublicKey([]byte(str)) - return -} - -func (s *EpochTrustValueStorage) update(prm UpdatePrm) { - s.mtx.Lock() - - { - strID := stringifyPeerID(prm.peer) - - val, ok := s.mItems[strID] - if !ok { - val = new(trustValue) - s.mItems[strID] = val - } - - if prm.sat { - val.sat++ - } - - val.all++ - } - - s.mtx.Unlock() -} - -// Update updates the number of satisfactory transactions with peer. -func (s *Storage) Update(prm UpdatePrm) { - var trustStorage *EpochTrustValueStorage - - s.mtx.Lock() - - { - var ( - ok bool - epoch = prm.epoch - ) - - trustStorage, ok = s.mItems[epoch] - if !ok { - trustStorage = newTrustValueStorage() - s.mItems[epoch] = trustStorage - } - } - - s.mtx.Unlock() - - trustStorage.update(prm) -} - -// ErrNoPositiveTrust is returned by iterator when -// there is no positive number of successful transactions. -var ErrNoPositiveTrust = errors.New("no positive trust") - -// DataForEpoch returns EpochValueStorage for epoch. -// -// If there is no data for the epoch, ErrNoPositiveTrust returns. -func (s *Storage) DataForEpoch(epoch uint64) (*EpochTrustValueStorage, error) { - s.mtx.RLock() - trustStorage, ok := s.mItems[epoch] - s.mtx.RUnlock() - - if !ok { - return nil, ErrNoPositiveTrust - } - - return trustStorage, nil -} - -// Iterate iterates over normalized trust values and passes them to parameterized handler. -// -// Values are normalized according to http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5. -// If divisor in formula is zero, ErrNoPositiveTrust returns. -func (s *EpochTrustValueStorage) Iterate(h reputation.TrustHandler) (err error) { - s.mtx.RLock() - - { - var ( - sum reputation.TrustValue - mVals = make(map[string]reputation.TrustValue, len(s.mItems)) - ) - - // iterate first time to calculate normalizing divisor - for strID, val := range s.mItems { - if val.all > 0 { - num := reputation.TrustValueFromInt(val.sat) - denom := reputation.TrustValueFromInt(val.all) - - v := num.Div(denom) - - mVals[strID] = v - - sum.Add(v) - } - } - - err = ErrNoPositiveTrust - - if !sum.IsZero() { - for strID, val := range mVals { - t := reputation.Trust{} - - t.SetPeer(peerIDFromString(strID)) - t.SetValue(val.Div(sum)) - - if err = h(t); err != nil { - break - } - } - } - } - - s.mtx.RUnlock() - - return -} diff --git a/pkg/services/reputation/local/storage/storage.go b/pkg/services/reputation/local/storage/storage.go deleted file mode 100644 index d7e54a3fc..000000000 --- a/pkg/services/reputation/local/storage/storage.go +++ /dev/null @@ -1,41 +0,0 @@ -package truststorage - -import ( - "sync" -) - -// Prm groups the required parameters of the Storage's constructor. -// -// All values must comply with the requirements imposed on them. -// Passing incorrect parameter values will result in constructor -// failure (error or panic depending on the implementation). -type Prm struct{} - -// Storage represents in-memory storage of -// local reputation values. -// -// Storage provides access to normalized local trust -// values through iterator interface. -// -// For correct operation, Storage must be created -// using the constructor (New) based on the required parameters -// and optional components. After successful creation, -// Storage is immediately ready to work through API. -type Storage struct { - prm Prm - - mtx sync.RWMutex - - mItems map[uint64]*EpochTrustValueStorage -} - -// New creates a new instance of the Storage. -// -// The created Storage does not require additional -// initialization and is completely ready for work. -func New(prm Prm) *Storage { - return &Storage{ - prm: prm, - mItems: make(map[uint64]*EpochTrustValueStorage), - } -} diff --git a/pkg/services/reputation/rpc/response.go b/pkg/services/reputation/rpc/response.go deleted file mode 100644 index 808a0a476..000000000 --- a/pkg/services/reputation/rpc/response.go +++ /dev/null @@ -1,50 +0,0 @@ -package reputationrpc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" -) - -type responseService struct { - respSvc *response.Service - - svc Server -} - -// NewResponseService returns reputation service server instance that passes -// internal service call to response service. -func NewResponseService(cnrSvc Server, respSvc *response.Service) Server { - return &responseService{ - respSvc: respSvc, - svc: cnrSvc, - } -} - -func (s *responseService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) { - resp, err := s.respSvc.HandleUnaryRequest(ctx, req, - func(ctx context.Context, req any) (util.ResponseMessage, error) { - return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest)) - }, - ) - if err != nil { - return nil, err - } - - return resp.(*reputation.AnnounceLocalTrustResponse), nil -} - -func (s *responseService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) { - resp, err := s.respSvc.HandleUnaryRequest(ctx, req, - func(ctx context.Context, req any) (util.ResponseMessage, error) { - return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest)) - }, - ) - if err != nil { - return nil, err - } - - return resp.(*reputation.AnnounceIntermediateResultResponse), nil -} diff --git a/pkg/services/reputation/rpc/server.go b/pkg/services/reputation/rpc/server.go deleted file mode 100644 index 78af30ea7..000000000 --- a/pkg/services/reputation/rpc/server.go +++ /dev/null @@ -1,13 +0,0 @@ -package reputationrpc - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation" -) - -// Server is an interface of the FrostFS API v2 Reputation service server. -type Server interface { - AnnounceLocalTrust(context.Context, *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) - AnnounceIntermediateResult(context.Context, *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) -} diff --git a/pkg/services/reputation/rpc/sign.go b/pkg/services/reputation/rpc/sign.go deleted file mode 100644 index 9db06ff1e..000000000 --- a/pkg/services/reputation/rpc/sign.go +++ /dev/null @@ -1,54 +0,0 @@ -package reputationrpc - -import ( - "context" - "crypto/ecdsa" - - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" -) - -type signService struct { - sigSvc *util.SignService - - svc Server -} - -func NewSignService(key *ecdsa.PrivateKey, svc Server) Server { - return &signService{ - sigSvc: util.NewUnarySignService(key), - svc: svc, - } -} - -func (s *signService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) { - resp, err := s.sigSvc.HandleUnaryRequest(ctx, req, - func(ctx context.Context, req any) (util.ResponseMessage, error) { - return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest)) - }, - func() util.ResponseMessage { - return new(reputation.AnnounceLocalTrustResponse) - }, - ) - if err != nil { - return nil, err - } - - return resp.(*reputation.AnnounceLocalTrustResponse), nil -} - -func (s *signService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) { - resp, err := s.sigSvc.HandleUnaryRequest(ctx, req, - func(ctx context.Context, req any) (util.ResponseMessage, error) { - return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest)) - }, - func() util.ResponseMessage { - return new(reputation.AnnounceIntermediateResultResponse) - }, - ) - if err != nil { - return nil, err - } - - return resp.(*reputation.AnnounceIntermediateResultResponse), nil -} diff --git a/pkg/services/reputation/trust.go b/pkg/services/reputation/trust.go deleted file mode 100644 index 8c5d9091a..000000000 --- a/pkg/services/reputation/trust.go +++ /dev/null @@ -1,102 +0,0 @@ -package reputation - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation" -) - -// TrustValue represents the numeric value of the node's trust. -type TrustValue float64 - -const ( - // TrustOne is a trust value equal to one. - TrustOne = TrustValue(1) - - // TrustZero is a trust value equal to zero. - TrustZero = TrustValue(0) -) - -// TrustValueFromFloat64 converts float64 to TrustValue. -func TrustValueFromFloat64(v float64) TrustValue { - return TrustValue(v) -} - -// TrustValueFromInt converts int to TrustValue. -func TrustValueFromInt(v int) TrustValue { - return TrustValue(v) -} - -func (v TrustValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// Float64 converts TrustValue to float64. -func (v TrustValue) Float64() float64 { - return float64(v) -} - -// Add adds v2 to v. -func (v *TrustValue) Add(v2 TrustValue) { - *v = *v + v2 -} - -// Div returns the result of dividing v by v2. -func (v TrustValue) Div(v2 TrustValue) TrustValue { - return v / v2 -} - -// Mul multiplies v by v2. -func (v *TrustValue) Mul(v2 TrustValue) { - *v *= v2 -} - -// IsZero returns true if v equal to zero. -func (v TrustValue) IsZero() bool { - return v == 0 -} - -// Trust represents peer's trust (reputation). -type Trust struct { - trusting, peer reputation.PeerID - - val TrustValue -} - -// TrustHandler describes the signature of the reputation.Trust -// value handling function. -// -// Termination of processing without failures is usually signaled -// with a zero error, while a specific value may describe the reason -// for failure. -type TrustHandler func(Trust) error - -// Value returns peer's trust value. -func (t Trust) Value() TrustValue { - return t.val -} - -// SetValue sets peer's trust value. -func (t *Trust) SetValue(val TrustValue) { - t.val = val -} - -// Peer returns trusted peer ID. -func (t Trust) Peer() reputation.PeerID { - return t.peer -} - -// SetPeer sets trusted peer ID. -func (t *Trust) SetPeer(id reputation.PeerID) { - t.peer = id -} - -// TrustingPeer returns trusting peer ID. -func (t Trust) TrustingPeer() reputation.PeerID { - return t.trusting -} - -// SetTrustingPeer sets trusting peer ID. -func (t *Trust) SetTrustingPeer(id reputation.PeerID) { - t.trusting = id -} diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 237a13962..5ad1d6518 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -5,6 +5,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -28,7 +29,7 @@ func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server { } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug("serving request...", + s.log.Debug(logs.ServingRequest, zap.String("component", "SessionService"), zap.String("request", "Create"), ) diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go index f59e312c4..21f55a7d1 100644 --- a/pkg/services/session/storage/persistent/executor.go +++ b/pkg/services/session/storage/persistent/executor.go @@ -17,7 +17,7 @@ import ( // encrypts private keys if storage has been configured so). // Returns response that is filled with just created token's // ID and public key for it. -func (s *TokenStore) Create(ctx context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { +func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing owner") diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index ded33d1ec..25f067d62 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -108,7 +109,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok return nil }) if err != nil { - s.l.Error("could not get session from persistent storage", + s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage, zap.Error(err), zap.Stringer("ownerID", ownerID), zap.String("tokenID", hex.EncodeToString(tokenID)), @@ -133,7 +134,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { if epochFromToken(v) <= epoch { err = c.Delete() if err != nil { - s.l.Error("could not delete %s token", + s.l.Error(logs.PersistentCouldNotDeleteSToken, zap.String("token_id", hex.EncodeToString(k)), ) } @@ -144,7 +145,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { }) }) if err != nil { - s.l.Error("could not clean up expired tokens", + s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens, zap.Uint64("epoch", epoch), ) } diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go index aa64d796b..cd498709c 100644 --- a/pkg/services/session/storage/temporary/executor.go +++ b/pkg/services/session/storage/temporary/executor.go @@ -12,7 +12,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -func (s *TokenStore) Create(ctx context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { +func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) (*session.CreateResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing owner") diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index ab9f509ac..3288083cc 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -27,7 +27,7 @@ type cacheItem struct { } const ( - defaultClientCacheSize = 10 + defaultClientCacheSize = 32 defaultClientConnectTimeout = time.Second * 2 defaultReconnectInterval = time.Second * 15 ) @@ -36,7 +36,9 @@ var errRecentlyFailed = errors.New("client has recently failed") func (c *clientCache) init() { l, _ := simplelru.NewLRU[string, cacheItem](defaultClientCacheSize, func(_ string, value cacheItem) { - _ = value.cc.Close() + if conn := value.cc; conn != nil { + _ = conn.Close() + } }) c.LRU = *l } diff --git a/pkg/services/tree/drop.go b/pkg/services/tree/drop.go index c0750cbdc..a9e4e2e71 100644 --- a/pkg/services/tree/drop.go +++ b/pkg/services/tree/drop.go @@ -7,8 +7,8 @@ import ( ) // DropTree drops a tree from the database. If treeID is empty, all the trees are dropped. -func (s *Service) DropTree(_ context.Context, cid cid.ID, treeID string) error { +func (s *Service) DropTree(ctx context.Context, cid cid.ID, treeID string) error { // The only current use-case is a container removal, where all trees should be removed. // Thus there is no need to replicate the operation on other node. - return s.forest.TreeDrop(cid, treeID) + return s.forest.TreeDrop(ctx, cid, treeID) } diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index fd65ac3f0..dc4ce29aa 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -1,6 +1,7 @@ package tree import ( + "context" "errors" "testing" @@ -32,7 +33,7 @@ func TestGetSubTree(t *testing.T) { meta := []pilorama.KeyValue{ {Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}} - lm, err := p.TreeAddByPath(d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta) + lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta) require.NoError(t, err) require.Equal(t, 1, len(lm)) @@ -41,7 +42,7 @@ func TestGetSubTree(t *testing.T) { testGetSubTree := func(t *testing.T, rootID uint64, depth uint32, errIndex int) []uint64 { acc := subTreeAcc{errIndex: errIndex} - err := getSubTree(&acc, d.CID, &GetSubTreeRequest_Body{ + err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{ TreeId: treeID, RootId: rootID, Depth: depth, @@ -68,7 +69,7 @@ func TestGetSubTree(t *testing.T) { // GetSubTree must return valid meta. for i := range acc.seen { b := acc.seen[i].Body - meta, node, err := p.TreeGetMeta(d.CID, treeID, b.NodeId) + meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId) require.NoError(t, err) require.Equal(t, node, b.ParentId) require.Equal(t, meta.Time, b.Timestamp) diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 1671d2511..3de71b554 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -5,7 +5,11 @@ import ( "context" "errors" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -24,12 +28,18 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo for _, n := range cntNodes { var stop bool n.IterateNetworkEndpoints(func(endpoint string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + c, err := s.cache.get(ctx, endpoint) if err != nil { return false } - s.log.Debug("redirecting tree service query", zap.String("endpoint", endpoint)) + s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) called = true stop = f(c) return true diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index bb20310b2..60d0eff50 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -8,9 +8,13 @@ import ( "fmt" "time" + "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -37,17 +41,25 @@ const ( defaultReplicatorSendTimeout = time.Second * 5 ) -func (s *Service) localReplicationWorker() { +func (s *Service) localReplicationWorker(ctx context.Context) { for { select { case <-s.closeCh: return case op := <-s.replicateLocalCh: - err := s.forest.TreeApply(op.cid, op.treeID, &op.Move, false) + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationOperation", + trace.WithAttributes( + attribute.String("tree_id", op.treeID), + attribute.String("container_id", op.cid.EncodeToString()), + ), + ) + + err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) if err != nil { - s.log.Error("failed to apply replicated operation", + s.log.Error(logs.TreeFailedToApplyReplicatedOperation, zap.String("err", err.Error())) } + span.End() } } } @@ -58,10 +70,24 @@ func (s *Service) replicationWorker(ctx context.Context) { case <-s.closeCh: return case task := <-s.replicationTasks: + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())), + ), + ) + var lastErr error var lastAddr string task.n.IterateNetworkEndpoints(func(addr string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + lastAddr = addr c, err := s.cache.get(ctx, addr) @@ -79,15 +105,16 @@ func (s *Service) replicationWorker(ctx context.Context) { if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { - s.log.Debug("do not send update to the node", + s.log.Debug(logs.TreeDoNotSendUpdateToTheNode, zap.String("last_error", lastErr.Error())) } else { - s.log.Warn("failed to sent update to the node", + s.log.Warn(logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), zap.String("key", hex.EncodeToString(task.n.PublicKey()))) } } + span.End() } } } @@ -95,7 +122,7 @@ func (s *Service) replicationWorker(ctx context.Context) { func (s *Service) replicateLoop(ctx context.Context) { for i := 0; i < s.replicatorWorkerCount; i++ { go s.replicationWorker(ctx) - go s.localReplicationWorker() + go s.localReplicationWorker(ctx) } defer func() { for len(s.replicationTasks) != 0 { @@ -112,7 +139,7 @@ func (s *Service) replicateLoop(ctx context.Context) { case op := <-s.replicateCh: err := s.replicate(op) if err != nil { - s.log.Error("error during replication", + s.log.Error(logs.TreeErrorDuringReplication, zap.String("err", err.Error()), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index edea450f1..546b7a207 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -119,7 +119,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{ + log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ Parent: b.GetParentId(), Child: pilorama.RootID, Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())}, @@ -174,7 +174,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - logs, err := s.forest.TreeAddByPath(d, b.GetTreeId(), attr, b.GetPath(), meta) + logs, err := s.forest.TreeAddByPath(ctx, d, b.GetTreeId(), attr, b.GetPath(), meta) if err != nil { return nil, err } @@ -231,7 +231,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{ + log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ Parent: pilorama.TrashID, Child: b.GetNodeId(), }) @@ -280,7 +280,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} - log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{ + log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{ Parent: b.GetParentId(), Child: b.GetNodeId(), Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())}, @@ -328,14 +328,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) attr = pilorama.AttributeFilename } - nodes, err := s.forest.TreeGetByPath(cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly()) + nodes, err := s.forest.TreeGetByPath(ctx, cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly()) if err != nil { return nil, err } info := make([]*GetNodeByPathResponse_Info, 0, len(nodes)) for _, node := range nodes { - m, parent, err := s.forest.TreeGetMeta(cid, b.GetTreeId(), node) + m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node) if err != nil { return nil, err } @@ -406,10 +406,10 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return nil } - return getSubTree(srv, cid, b, s.forest) + return getSubTree(srv.Context(), srv, cid, b, s.forest) } -func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { +func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { // Traverse the tree in a DFS manner. Because we need to support arbitrary depth, // recursive implementation is not suitable here, so we maintain explicit stack. stack := [][]uint64{{b.GetRootId()}} @@ -425,7 +425,7 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe nodeID := stack[len(stack)-1][0] stack[len(stack)-1] = stack[len(stack)-1][1:] - m, p, err := forest.TreeGetMeta(cid, b.GetTreeId(), nodeID) + m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), nodeID) if err != nil { return err } @@ -442,7 +442,7 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe } if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() { - children, err := forest.TreeGetChildren(cid, b.GetTreeId(), nodeID) + children, err := forest.TreeGetChildren(ctx, cid, b.GetTreeId(), nodeID) if err != nil { return err } @@ -532,7 +532,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) h := b.GetHeight() for { - lm, err := s.forest.TreeGetOpLog(cid, b.GetTreeId(), h) + lm, err := s.forest.TreeGetOpLog(srv.Context(), cid, b.GetTreeId(), h) if err != nil || lm.Time == 0 { return err } @@ -587,7 +587,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return resp, outErr } - ids, err := s.forest.TreeList(cid) + ids, err := s.forest.TreeList(ctx, cid) if err != nil { return nil, err } diff --git a/pkg/services/tree/service.pb.go b/pkg/services/tree/service.pb.go index 6b5571c3a..08664a6d0 100644 Binary files a/pkg/services/tree/service.pb.go and b/pkg/services/tree/service.pb.go differ diff --git a/pkg/services/tree/service_grpc.pb.go b/pkg/services/tree/service_grpc.pb.go index 722fd1705..fa259e804 100644 Binary files a/pkg/services/tree/service_grpc.pb.go and b/pkg/services/tree/service_grpc.pb.go differ diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 7a5a95c4c..439912969 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -8,6 +8,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -56,7 +57,23 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op return fmt.Errorf("can't get container %s: %w", cid, err) } - role, err := roleFromReq(cnr, req) + eaclOp := eACLOp(op) + + var bt *bearer.Token + if len(rawBearer) > 0 { + bt = new(bearer.Token) + if err = bt.Unmarshal(rawBearer); err != nil { + return eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err)) + } + if !bt.AssertContainer(cid) { + return eACLErr(eaclOp, errBearerWrongContainer) + } + if !bt.VerifySignature() { + return eACLErr(eaclOp, errBearerSignature) + } + } + + role, err := roleFromReq(cnr, req, bt) if err != nil { return fmt.Errorf("can't get request role: %w", err) } @@ -71,12 +88,10 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op return nil } - eaclOp := eACLOp(op) - var tableFromBearer bool if len(rawBearer) != 0 { if !basicACL.AllowedBearerRules(op) { - s.log.Debug("bearer presented but not allowed by ACL", + s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL, zap.String("cid", cid.EncodeToString()), zap.String("op", op.String()), ) @@ -86,36 +101,39 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op } var tb eacl.Table + signer := req.GetSignature().GetKey() if tableFromBearer { - var bt bearer.Token - if err = bt.Unmarshal(rawBearer); err != nil { - return eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err)) + if bt.Impersonate() { + tbCore, err := s.eaclSource.GetEACL(cid) + if err != nil { + return handleGetEACLError(err) + } + tb = *tbCore.Value + signer = bt.SigningKeyBytes() + } else { + if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) { + return eACLErr(eaclOp, errBearerWrongOwner) + } + tb = bt.EACLTable() } - if !bearer.ResolveIssuer(bt).Equals(cnr.Value.Owner()) { - return eACLErr(eaclOp, errBearerWrongOwner) - } - if !bt.AssertContainer(cid) { - return eACLErr(eaclOp, errBearerWrongContainer) - } - if !bt.VerifySignature() { - return eACLErr(eaclOp, errBearerSignature) - } - - tb = bt.EACLTable() } else { tbCore, err := s.eaclSource.GetEACL(cid) if err != nil { - if client.IsErrEACLNotFound(err) { - return nil - } - - return fmt.Errorf("get eACL table: %w", err) + return handleGetEACLError(err) } tb = *tbCore.Value } - return checkEACL(tb, req.GetSignature().GetKey(), eACLRole(role), eaclOp) + return checkEACL(tb, signer, eACLRole(role), eaclOp) +} + +func handleGetEACLError(err error) error { + if client.IsErrEACLNotFound(err) { + return nil + } + + return fmt.Errorf("get eACL table: %w", err) } func verifyMessage(m message) error { @@ -168,11 +186,16 @@ func SignMessage(m message, key *ecdsa.PrivateKey) error { return nil } -func roleFromReq(cnr *core.Container, req message) (acl.Role, error) { +func roleFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, error) { role := acl.RoleOthers owner := cnr.Value.Owner() - pub, err := keys.NewPublicKeyFromBytes(req.GetSignature().GetKey(), elliptic.P256()) + rawKey := req.GetSignature().GetKey() + if bt != nil && bt.Impersonate() { + rawKey = bt.SigningKeyBytes() + } + + pub, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256()) if err != nil { return role, fmt.Errorf("invalid public key: %w", err) } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index b336e60a2..eaf9b8b79 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -53,6 +53,16 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { return cnt, nil } +type dummyEACLSource map[string]*containercore.EACL + +func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) { + cntEACL, ok := s[id.String()] + if !ok { + return nil, errors.New("container not found") + } + return cntEACL, nil +} + func testContainer(owner user.ID) container.Container { var r netmapSDK.ReplicaDescriptor r.SetNumberOfObjects(1) @@ -93,6 +103,11 @@ func TestMessageSign(t *testing.T) { cnrSource: dummyContainerSource{ cid1.String(): cnr, }, + eaclSource: dummyEACLSource{ + cid1.String(): &containercore.EACL{ + Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()), + }, + }, }, } @@ -178,6 +193,19 @@ func TestMessageSign(t *testing.T) { require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) + t.Run("impersonate", func(t *testing.T) { + cnr.Value.SetBasicACL(acl.PublicRWExtended) + var bt bearer.Token + bt.SetImpersonate(true) + + require.NoError(t, bt.Sign(privs[1].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.Error(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) require.NoError(t, bt.Sign(privs[0].PrivateKey)) req.Body.BearerToken = bt.Marshal() @@ -202,6 +230,13 @@ func TestMessageSign(t *testing.T) { } func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token { + var b bearer.Token + b.SetEACLTable(*testTable(cid, forPutGet, forGet)) + + return b +} + +func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table { tgtGet := eaclSDK.NewTarget() tgtGet.SetRole(eaclSDK.RoleUnknown) tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()}) @@ -237,8 +272,5 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token tb.SetCID(cid) - var b bearer.Token - b.SetEACLTable(*tb) - - return b + return tb } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 47299d1c9..ed2455194 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -11,6 +11,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -84,17 +85,17 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { } for _, tid := range treesToSync { - h, err := s.forest.TreeLastSyncHeight(cid, tid) + h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - s.log.Warn("could not get last synchronized height for a tree", + s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) continue } newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes) if h < newHeight { - if err := s.forest.TreeUpdateLastSyncHeight(cid, tid, newHeight); err != nil { - s.log.Warn("could not update last synchronized height for a tree", + if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil { + s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) } @@ -124,18 +125,164 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string return nil } +// mergeOperationStreams performs merge sort for node operation streams to one stream. +func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { + defer close(merged) + + ms := make([]*pilorama.Move, len(streams)) + for i := range streams { + ms[i] = <-streams[i] + } + + // Merging different node streams shuffles incoming operations like that: + // + // x - operation from the stream A + // o - operation from the stream B + // + // --o---o--x--x--x--o---x--x------> t + // ^ + // If all ops have been successfully applied, we must start from the last + // operation height from the stream B. This height is stored in minStreamedLastHeight. + var minStreamedLastHeight uint64 = math.MaxUint64 + + for { + var minTimeMoveTime uint64 = math.MaxUint64 + minTimeMoveIndex := -1 + for i, m := range ms { + if m != nil && minTimeMoveTime > m.Time { + minTimeMoveTime = m.Time + minTimeMoveIndex = i + } + } + + if minTimeMoveIndex == -1 { + break + } + + merged <- ms[minTimeMoveIndex] + height := ms[minTimeMoveIndex].Time + if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { + if minStreamedLastHeight > height { + minStreamedLastHeight = height + } + } + } + + return minStreamedLastHeight +} + +func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, + operationStream <-chan *pilorama.Move) uint64 { + errGroup, _ := errgroup.WithContext(ctx) + const workersCount = 1024 + errGroup.SetLimit(workersCount) + + // We run TreeApply concurrently for the operation batch. Let's consider two operations + // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail + // on m1. That means the service must start sync from m1.Time in the next iteration and + // this height is stored in unappliedOperationHeight. + var unappliedOperationHeight uint64 = math.MaxUint64 + var heightMtx sync.Mutex + + var prev *pilorama.Move + for m := range operationStream { + m := m + + // skip already applied op + if prev != nil && prev.Time == m.Time { + continue + } + prev = m + + errGroup.Go(func() error { + if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil { + heightMtx.Lock() + if m.Time < unappliedOperationHeight { + unappliedOperationHeight = m.Time + } + heightMtx.Unlock() + return err + } + return nil + }) + } + _ = errGroup.Wait() + return unappliedOperationHeight +} + +func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, + height uint64, treeClient TreeServiceClient, opsCh chan<- *pilorama.Move) (uint64, error) { + rawCID := make([]byte, sha256.Size) + cid.Encode(rawCID) + + for { + newHeight := height + req := &GetOpLogRequest{ + Body: &GetOpLogRequest_Body{ + ContainerId: rawCID, + TreeId: treeID, + Height: newHeight, + }, + } + if err := SignMessage(req, s.key); err != nil { + return 0, err + } + + c, err := treeClient.GetOpLog(ctx, req) + if err != nil { + return 0, fmt.Errorf("can't initialize client: %w", err) + } + res, err := c.Recv() + for ; err == nil; res, err = c.Recv() { + lm := res.GetBody().GetOperation() + m := &pilorama.Move{ + Parent: lm.ParentId, + Child: lm.ChildId, + } + if err := m.Meta.FromBytes(lm.Meta); err != nil { + return 0, err + } + opsCh <- m + } + if height == newHeight || err != nil && !errors.Is(err, io.EOF) { + return newHeight, err + } + height = newHeight + } +} + +// synchronizeTree synchronizes operations getting them from different nodes. +// Each available node does stream operations to a separate stream. These streams +// are merged into one big stream ordered by operation time. This way allows to skip +// already applied operation and keep good batching. +// The method returns a height that service should start sync from in the next time. func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, treeID string, nodes []netmapSDK.NodeInfo) uint64 { - s.log.Debug("synchronize tree", + s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) errGroup, egCtx := errgroup.WithContext(ctx) - const workersCount = 4 + const workersCount = 1024 errGroup.SetLimit(workersCount) - heights := make([]uint64, len(nodes)) + nodeOperationStreams := make([]chan *pilorama.Move, len(nodes)) + for i := range nodeOperationStreams { + nodeOperationStreams[i] = make(chan *pilorama.Move) + } + merged := make(chan *pilorama.Move) + var minStreamedLastHeight uint64 + errGroup.Go(func() error { + minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged) + return nil + }) + var minUnappliedHeight uint64 + errGroup.Go(func() error { + minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged) + return nil + }) + for i, n := range nodes { i := i n := n @@ -163,7 +310,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, treeClient := NewTreeServiceClient(cc) for { - h, err := s.synchronizeSingle(egCtx, cid, treeID, height, treeClient) + h, err := s.startStream(egCtx, cid, treeID, from, treeClient, nodeOperationStreams[i]) if height < h { height = h } @@ -173,80 +320,23 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, } } }) - - if height <= from { // do not increase starting height on fail - heights[i] = from - return nil - } - heights[i] = height + close(nodeOperationStreams[i]) return nil }) } - if err := errGroup.Wait(); err != nil { - s.log.Warn("failed to run tree synchronization over all nodes", zap.Error(err)) + s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) } - newHeight := uint64(math.MaxUint64) - for _, height := range heights { // take minimum across all clients - if height < newHeight { - newHeight = height - } - } - if newHeight == math.MaxUint64 { - newHeight = from + newHeight := minStreamedLastHeight + if newHeight > minUnappliedHeight { + newHeight = minUnappliedHeight + } else { + newHeight++ } return newHeight } -func (s *Service) synchronizeSingle(ctx context.Context, cid cid.ID, treeID string, height uint64, treeClient TreeServiceClient) (uint64, error) { - rawCID := make([]byte, sha256.Size) - cid.Encode(rawCID) - - for { - newHeight := height - req := &GetOpLogRequest{ - Body: &GetOpLogRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Height: newHeight, - }, - } - if err := SignMessage(req, s.key); err != nil { - return newHeight, err - } - - c, err := treeClient.GetOpLog(ctx, req) - if err != nil { - return newHeight, fmt.Errorf("can't initialize client: %w", err) - } - - res, err := c.Recv() - for ; err == nil; res, err = c.Recv() { - lm := res.GetBody().GetOperation() - m := &pilorama.Move{ - Parent: lm.ParentId, - Child: lm.ChildId, - } - if err := m.Meta.FromBytes(lm.Meta); err != nil { - return newHeight, err - } - if err := s.forest.TreeApply(cid, treeID, m, true); err != nil { - return newHeight, err - } - if m.Time > newHeight { - newHeight = m.Time + 1 - } else { - newHeight++ - } - } - if height == newHeight || err != nil && !errors.Is(err, io.EOF) { - return newHeight, err - } - height = newHeight - } -} - // ErrAlreadySyncing is returned when a service synchronization has already // been started. var ErrAlreadySyncing = errors.New("service is being synchronized") @@ -283,11 +373,13 @@ func (s *Service) syncLoop(ctx context.Context) { case <-ctx.Done(): return case <-s.syncChan: - s.log.Debug("syncing trees...") + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync") + s.log.Debug(logs.TreeSyncingTrees) cnrs, err := s.cfg.cnrSource.List() if err != nil { - s.log.Error("could not fetch containers", zap.Error(err)) + s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err)) + span.End() continue } @@ -297,12 +389,16 @@ func (s *Service) syncLoop(ctx context.Context) { s.removeContainers(ctx, newMap) - s.log.Debug("trees have been synchronized") + s.log.Debug(logs.TreeTreesHaveBeenSynchronized) + span.End() } } } func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.syncContainers") + defer span.End() + // sync new containers var wg sync.WaitGroup for _, cnr := range cnrs { @@ -310,19 +406,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { cnr := cnr err := s.syncPool.Submit(func() { defer wg.Done() - s.log.Debug("syncing container trees...", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) err := s.synchronizeAllTrees(ctx, cnr) if err != nil { - s.log.Error("could not sync trees", zap.Stringer("cid", cnr), zap.Error(err)) + s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) return } - s.log.Debug("container trees have been synced", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) }) if err != nil { wg.Done() - s.log.Error("could not query trees for synchronization", + s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization, zap.Stringer("cid", cnr), zap.Error(err)) if errors.Is(err, ants.ErrPoolClosed) { @@ -334,6 +430,9 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { } func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers") + defer span.End() + s.cnrMapMtx.Lock() defer s.cnrMapMtx.Unlock() @@ -349,11 +448,11 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } for _, cnr := range removed { - s.log.Debug("removing redundant trees...", zap.Stringer("cid", cnr)) + s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) err := s.DropTree(ctx, cnr, "") if err != nil { - s.log.Error("could not remove redundant tree", + s.log.Error(logs.TreeCouldNotRemoveRedundantTree, zap.Stringer("cid", cnr), zap.Error(err)) } @@ -367,7 +466,7 @@ func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID for _, cnr := range cnrs { _, pos, err := s.getContainerNodes(cnr) if err != nil { - s.log.Error("could not calculate container nodes", + s.log.Error(logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go new file mode 100644 index 000000000..190b4ccbb --- /dev/null +++ b/pkg/services/tree/sync_test.go @@ -0,0 +1,80 @@ +package tree + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "github.com/stretchr/testify/require" +) + +func Test_mergeOperationStreams(t *testing.T) { + tests := []struct { + name string + opTimes [][]uint64 + wantValues []uint64 + wantMinHeight uint64 + }{ + { + name: "1", + opTimes: [][]uint64{ + {250, 251, 255}, + {252, 253, 254, 256, 257}, + }, + wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257}, + wantMinHeight: 255, + }, + { + name: "2", + opTimes: [][]uint64{ + {250, 251, 255, 259}, + {252, 253, 254, 256, 257}, + }, + wantValues: []uint64{250, 251, 252, 253, 254, 255, 256, 257, 259}, + wantMinHeight: 257, + }, + { + name: "3", + opTimes: [][]uint64{ + {250, 251, 255}, + {249, 250, 251, 253, 254, 256, 257}, + }, + wantValues: []uint64{249, 250, 250, 251, 251, 253, 254, 255, 256, 257}, + wantMinHeight: 255, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nodeOpChans := make([]chan *pilorama.Move, len(tt.opTimes)) + for i := range nodeOpChans { + nodeOpChans[i] = make(chan *pilorama.Move) + } + + // generate and put values to all chans + for i, ch := range nodeOpChans { + i := i + ch := ch + go func() { + for _, tm := range tt.opTimes[i] { + op := &pilorama.Move{} + op.Time = tm + ch <- op + } + close(nodeOpChans[i]) + }() + } + + merged := make(chan *pilorama.Move, 1) + min := make(chan uint64) + go func() { + min <- mergeOperationStreams(nodeOpChans, merged) + }() + + var res []uint64 + for op := range merged { + res = append(res, op.Time) + } + require.Equal(t, tt.wantValues, res) + require.Equal(t, tt.wantMinHeight, <-min) + }) + } +} diff --git a/pkg/services/tree/types.pb.go b/pkg/services/tree/types.pb.go index f3f64180a..45d889177 100644 Binary files a/pkg/services/tree/types.pb.go and b/pkg/services/tree/types.pb.go differ diff --git a/pkg/util/config/dir.go b/pkg/util/config/dir.go index a74992d19..0379fe268 100644 --- a/pkg/util/config/dir.go +++ b/pkg/util/config/dir.go @@ -1,6 +1,7 @@ package config import ( + "fmt" "os" "path" @@ -20,7 +21,7 @@ func ReadConfigDir(v *viper.Viper, configDir string) error { continue } ext := path.Ext(entry.Name()) - if ext != ".yaml" && ext != ".yml" { + if ext != ".yaml" && ext != ".yml" && ext != ".json" { continue } @@ -33,22 +34,15 @@ func ReadConfigDir(v *viper.Viper, configDir string) error { } // mergeConfig reads config file and merge its content with current viper. -func mergeConfig(v *viper.Viper, fileName string) (err error) { - var cfgFile *os.File - cfgFile, err = os.Open(fileName) +func mergeConfig(v *viper.Viper, fileName string) error { + cv := viper.New() + cv.SetConfigFile(fileName) + err := cv.ReadInConfig() if err != nil { - return err + return fmt.Errorf("failed to read config: %w", err) } - - defer func() { - errClose := cfgFile.Close() - if err == nil { - err = errClose - } - }() - - if err = v.MergeConfig(cfgFile); err != nil { - return err + if err = v.MergeConfigMap(cv.AllSettings()); err != nil { + return fmt.Errorf("failed to merge config: %w", err) } return nil diff --git a/pkg/util/locode/db/boltdb/calls.go b/pkg/util/locode/db/boltdb/calls.go index 171808af2..6a80def3a 100644 --- a/pkg/util/locode/db/boltdb/calls.go +++ b/pkg/util/locode/db/boltdb/calls.go @@ -103,7 +103,7 @@ func recordFromValue(data []byte) (*locodedb.Record, error) { // Must not be called before successful Open call. // Must not be called in read-only mode: behavior is undefined. func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error { - return db.bolt.Update(func(tx *bbolt.Tx) error { + return db.bolt.Batch(func(tx *bbolt.Tx) error { countryKey, err := countryBucketKey(key.CountryCode()) if err != nil { return err diff --git a/pkg/util/locode/db/db.go b/pkg/util/locode/db/db.go index 2a0f26689..8c71ea794 100644 --- a/pkg/util/locode/db/db.go +++ b/pkg/util/locode/db/db.go @@ -3,8 +3,10 @@ package locodedb import ( "errors" "fmt" + "runtime" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/locode" + "golang.org/x/sync/errgroup" ) // SourceTable is an interface of the UN/LOCODE table. @@ -75,81 +77,93 @@ type NamesDB interface { // FillDatabase generates the FrostFS location database based on the UN/LOCODE table. func FillDatabase(table SourceTable, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error { - return table.IterateAll(func(tableRecord locode.Record) error { - if tableRecord.LOCODE.LocationCode() == "" { + var errG errgroup.Group + + // Pick some sane default, after this the performance stopped increasing. + errG.SetLimit(runtime.NumCPU() * 4) + _ = table.IterateAll(func(tableRecord locode.Record) error { + errG.Go(func() error { + return processTableRecord(tableRecord, airports, continents, names, db) + }) + return nil + }) + return errG.Wait() +} + +func processTableRecord(tableRecord locode.Record, airports AirportDB, continents ContinentsDB, names NamesDB, db DB) error { + if tableRecord.LOCODE.LocationCode() == "" { + return nil + } + + dbKey, err := NewKey(tableRecord.LOCODE) + if err != nil { + return err + } + + dbRecord, err := NewRecord(tableRecord) + if err != nil { + if errors.Is(err, errParseCoordinates) { return nil } - dbKey, err := NewKey(tableRecord.LOCODE) - if err != nil { - return err - } + return err + } - dbRecord, err := NewRecord(tableRecord) + geoPoint := dbRecord.GeoPoint() + countryName := "" + + if geoPoint == nil { + airportRecord, err := airports.Get(tableRecord) if err != nil { - if errors.Is(err, errParseCoordinates) { + if errors.Is(err, ErrAirportNotFound) { return nil } return err } - geoPoint := dbRecord.GeoPoint() - countryName := "" + geoPoint = airportRecord.Point + countryName = airportRecord.CountryName + } - if geoPoint == nil { - airportRecord, err := airports.Get(tableRecord) - if err != nil { - if errors.Is(err, ErrAirportNotFound) { - return nil - } + dbRecord.SetGeoPoint(geoPoint) - return err - } - - geoPoint = airportRecord.Point - countryName = airportRecord.CountryName - } - - dbRecord.SetGeoPoint(geoPoint) - - if countryName == "" { - countryName, err = names.CountryName(dbKey.CountryCode()) - if err != nil { - if errors.Is(err, ErrCountryNotFound) { - return nil - } - - return err - } - } - - dbRecord.SetCountryName(countryName) - - if subDivCode := dbRecord.SubDivCode(); subDivCode != "" { - subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode) - if err != nil { - if errors.Is(err, ErrSubDivNotFound) { - return nil - } - - return err - } - - dbRecord.SetSubDivName(subDivName) - } - - continent, err := continents.PointContinent(geoPoint) + if countryName == "" { + countryName, err = names.CountryName(dbKey.CountryCode()) if err != nil { - return fmt.Errorf("could not calculate continent geo point: %w", err) - } else if continent.Is(ContinentUnknown) { - return nil + if errors.Is(err, ErrCountryNotFound) { + return nil + } + + return err + } + } + + dbRecord.SetCountryName(countryName) + + if subDivCode := dbRecord.SubDivCode(); subDivCode != "" { + subDivName, err := names.SubDivName(dbKey.CountryCode(), subDivCode) + if err != nil { + if errors.Is(err, ErrSubDivNotFound) { + return nil + } + + return err } - dbRecord.SetContinent(continent) + dbRecord.SetSubDivName(subDivName) + } - return db.Put(*dbKey, *dbRecord) - }) + continent, err := continents.PointContinent(geoPoint) + if err != nil { + return fmt.Errorf("could not calculate continent geo point: %w", err) + } else if continent.Is(ContinentUnknown) { + return nil + } + + dbRecord.SetContinent(continent) + + return db.Put(*dbKey, *dbRecord) } // LocodeRecord returns the record from the FrostFS location database diff --git a/scripts/export-metrics/main.go b/scripts/export-metrics/main.go new file mode 100644 index 000000000..ac6e786ab --- /dev/null +++ b/scripts/export-metrics/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics" +) + +var ( + node = flag.String("node", "", "File to export storage node metrics to.") + ir = flag.String("ir", "", "File to export innerring node metrics to.") +) + +func main() { + flag.Parse() + + if *node != "" && *ir != "" { + fmt.Println("-node and -ir flags are mutually exclusive") + os.Exit(1) + } + + var filename string + switch { + case *node != "": + _ = metrics.NewNodeMetrics() + filename = *node + case *ir != "": + _ = metrics.NewInnerRingMetrics() + filename = *ir + + default: + flag.Usage() + os.Exit(1) + } + + ds, err := metrics.DescribeAll() + if err != nil { + fmt.Fprintf(os.Stderr, "Could not parse metric descriptions: %v\n", err) + os.Exit(1) + } + + data, err := json.Marshal(ds) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not parse marshal: %v\n", err) + os.Exit(1) + } + + if err := ioutil.WriteFile(filename, data, 0644); err != nil { + fmt.Fprintf(os.Stderr, "Could write to file: %v\n", err) + os.Exit(1) + } +}