Compare commits

..

17 commits

Author SHA1 Message Date
33cefac665
[#9999] quota: Add waiting request limit
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-20 12:13:07 +03:00
7d709b4228
[#9999] quota: Add gRPC interceptors to set/extract IO tags
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-20 12:12:08 +03:00
ee1598e111
[#9999] quota: Add context methods
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-17 13:13:27 +03:00
0cccf4751b
[#9999] quota: Fix linter warnings
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-17 10:54:54 +03:00
c3aaa19cd1
[#9999] quota: Add mclock scheduler closing
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-17 10:42:24 +03:00
1ffd4cb1ca
[#9999] quota: Update benchmarks
Use reservation and limit slices.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-17 09:40:59 +03:00
df2dd8f721
[#9999] quota: Add unit tests
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-17 09:40:59 +03:00
e70c195c17
[#9999] quota: Simplify queue creation
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:57 +03:00
a53eb16a5a
[#9999] quota: Add tag adjustment
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:57 +03:00
d221caf3e8
[#9999] quota: Make mclock sync
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:56 +03:00
6de26ae851
[#9999] quota: Add benchmarks
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:56 +03:00
09de5d70cc
[#9999] quota: Concurrent mClock
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:55 +03:00
2f66f30e25
[#9999] quota: Do not use reservation queue if reservation is not defined
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:55 +03:00
9eda612274
[#9999] quota: Schedule many requests
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:46 +03:00
7256a12ff6
[#9999] quota: Refactor mClock definitions
Use ts instead of tag.
Use tag instead of vm.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:45 +03:00
4e6c58456d
[#9999] Add mClock implementation
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 15:32:45 +03:00
6eccfdddf1
[#9999] quota: Add limiter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-01-14 11:26:20 +03:00
242 changed files with 2645 additions and 2244 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -19,7 +19,6 @@ jobs:
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.23'
check-latest: true
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -89,7 +89,5 @@ linters:
- protogetter - protogetter
- intrange - intrange
- tenv - tenv
- unconvert
- unparam
disable-all: true disable-all: true
fast: false fast: false

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2 LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images # Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container # Run `make %` in Golang container
docker/%: docker/%:
docker run --rm -t \ docker run --rm -t \

View file

@ -16,16 +16,9 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r" EndpointFlagShort = "r"
WalletPath = "wallet"
WalletPathShorthand = "w"
WalletPathUsage = "Path to the wallet"
AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
AdminWalletPath = "wallet-admin"
AdminWalletUsage = "Path to the admin wallet"
LocalDumpFlag = "local-dump" LocalDumpFlag = "local-dump"
ProtoConfigPath = "protocol" ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts" ContractsInitFlag = "contracts"

View file

@ -28,7 +28,6 @@ const (
var ( var (
errNoPathsFound = errors.New("no metabase paths found") errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found") errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
) )
var UpgradeCmd = &cobra.Command{ var UpgradeCmd = &cobra.Command{
@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return err return err
} }
allSuccess := true
for mb, ok := range result { for mb, ok := range result {
if ok { if ok {
cmd.Println(mb, ": success") cmd.Println(mb, ": success")
} else { } else {
cmd.Println(mb, ": failed") cmd.Println(mb, ": failed")
allSuccess = false
} }
} }
if allSuccess { return nil
return nil
}
return errUpgradeFailed
} }
func getMetabasePaths(appCfg *config.Config) ([]string, error) { func getMetabasePaths(appCfg *config.Config) ([]string, error) {

View file

@ -3,8 +3,6 @@ package ape
import ( import (
"errors" "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -78,8 +76,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160 var ch util.Uint160

View file

@ -1,7 +1,6 @@
package frostfsid package frostfsid
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"sort" "sort"
@ -39,11 +38,6 @@ const (
groupIDFlag = "group-id" groupIDFlag = "group-id"
rootNamespacePlaceholder = "<root>" rootNamespacePlaceholder = "<root>"
keyFlag = "key"
keyDescFlag = "Key for storing a value in the subject's KV storage"
valueFlag = "value"
valueDescFlag = "Value to be stored in the subject's KV storage"
) )
var ( var (
@ -157,23 +151,6 @@ var (
}, },
Run: frostfsidListGroupSubjects, Run: frostfsidListGroupSubjects,
} }
frostfsidSetKVCmd = &cobra.Command{
Use: "set-kv",
Short: "Store a key-value pair in the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidSetKV,
}
frostfsidDeleteKVCmd = &cobra.Command{
Use: "delete-kv",
Short: "Delete a value from the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidDeleteKV,
}
) )
func initFrostfsIDCreateNamespaceCmd() { func initFrostfsIDCreateNamespaceCmd() {
@ -259,21 +236,6 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
} }
func initFrostfsIDSetKVCmd() {
Cmd.AddCommand(frostfsidSetKVCmd)
frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
}
func initFrostfsIDDeleteKVCmd() {
Cmd.AddCommand(frostfsidDeleteKVCmd)
frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
}
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
@ -291,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces() sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items) namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -343,7 +305,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns) sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -357,7 +319,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects() sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
@ -403,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns) sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items) groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -441,45 +403,6 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
} }
func frostfsidSetKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
value, _ := cmd.Flags().GetString(valueFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "set KV: %w", err)
}
func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
}
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd) groupID := getFrostfsIDGroupID(cmd)
@ -492,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -569,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
} }
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool var shouldStop bool
res := make([]stackitem.Item, 0) res := make([]stackitem.Item, 0)
for !shouldStop { for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
res = append(res, items...) res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize shouldStop = len(items) < batchSize
} }
return res, nil return res, nil

View file

@ -12,8 +12,6 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd() initFrostfsIDListGroupSubjectsCmd()
initFrostfsIDSetKVCmd()
initFrostfsIDDeleteKVCmd()
initFrostfsIDAddSubjectKeyCmd() initFrostfsIDAddSubjectKeyCmd()
initFrostfsIDRemoveSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd()
} }

View file

@ -3,6 +3,9 @@ package helper
import ( import (
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -13,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -24,86 +28,32 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke rpcInvoker invoker.RPCInvoke
} }
type AlphabetWallets struct {
Label string
Path string
}
func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
w, err := GetAlphabetWallets(v, a.Path)
if err != nil {
return nil, err
}
var accounts []*wallet.Account
for _, wall := range w {
acc, err := GetWalletAccount(wall, a.Label)
if err != nil {
return nil, err
}
accounts = append(accounts, acc)
}
return accounts, nil
}
type RegularWallets struct{ Path string }
func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
w, err := getRegularWallet(r.Path)
if err != nil {
return nil, err
}
return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
}
// NewLocalActor create LocalActor with accounts form provided wallets. // NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation. // In case of empty wallets provided created actor with dummy account only for read operation.
// //
// If wallets are provided, the contract client will use accounts with accName name from these wallets. // If wallets are provided, the contract client will use accounts with accName name from these wallets.
// To determine which account name should be used in a contract client, refer to how the contract // To determine which account name should be used in a contract client, refer to how the contract
// verifies the transaction signature. // verifies the transaction signature.
func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) {
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
var act *actor.Actor var act *actor.Actor
var accounts []*wallet.Account var accounts []*wallet.Account
var signers []actor.SignerAccount
if alphabet != nil { wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
account, err := alphabet.GetAccount(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
if err != nil {
return nil, err
}
accounts = append(accounts, account...) for _, w := range wallets {
signers = append(signers, actor.SignerAccount{ acc, err := GetWalletAccount(w, accName)
Signer: transaction.Signer{ commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
Account: account[0].Contract.ScriptHash(), accounts = append(accounts, acc)
Scopes: transaction.Global,
},
Account: account[0],
})
} }
act, err = actor.New(c, []actor.SignerAccount{{
for _, w := range regularWallets { Signer: transaction.Signer{
if w == nil { Account: accounts[0].Contract.ScriptHash(),
continue Scopes: transaction.Global,
} },
account, err := w.GetAccount() Account: accounts[0],
if err != nil { }})
return nil, err
}
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: account[0],
})
}
act, err := actor.New(c, signers)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
@ -23,27 +22,6 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
return nil, err
}
password, err := input.ReadPassword("Enter password for wallet:")
if err != nil {
return nil, fmt.Errorf("can't fetch password: %w", err)
}
for i := range w.Accounts {
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
err = fmt.Errorf("can't unlock wallet: %w", err)
break
}
}
return w, err
}
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir) wallets, err := openAlphabetWallets(v, walletDir)
if err != nil { if err != nil {
@ -73,7 +51,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
err = nil err = nil
} else { } else {
err = fmt.Errorf("can't open alphabet wallet: %w", err) err = fmt.Errorf("can't open wallet: %w", err)
} }
break break
} }

View file

@ -6,9 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper"
) )
func initRegisterCmd() { func initRegisterCmd() {
@ -21,7 +19,6 @@ func initRegisterCmd() {
registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
} }
@ -51,7 +48,6 @@ func initDeleteCmd() {
deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
} }
@ -66,28 +62,3 @@ func deleteDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
cmd.Println("Domain deleted successfully") cmd.Println("Domain deleted successfully")
} }
func initSetAdminCmd() {
Cmd.AddCommand(setAdminCmd)
setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
_ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
_ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
}
func setAdmin(cmd *cobra.Command, _ []string) {
c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
_, err = actor.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
cmd.Println("Set admin successfully")
}

View file

@ -1,11 +1,7 @@
package nns package nns
import ( import (
"errors"
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -20,32 +16,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
c, err := helper.NewRemoteClient(v) c, err := helper.NewRemoteClient(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
var (
alphabet *helper.AlphabetWallets
regularWallets []*helper.RegularWallets
)
if alphabetWalletPath != "" {
alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
}
if walletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
}
if adminWalletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
}
if alphabet == nil && regularWallets == nil {
commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
}
ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker) r := management.NewReader(ac.Invoker)

View file

@ -19,7 +19,6 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@ -41,7 +40,6 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
@ -54,7 +52,6 @@ func initDelRecordCmd() {
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)

View file

@ -39,7 +39,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: registerDomain, Run: registerDomain,
} }
@ -49,7 +48,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: deleteDomain, Run: deleteDomain,
} }
@ -77,7 +75,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: addRecord, Run: addRecord,
} }
@ -95,7 +92,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecords, Run: delRecords,
} }
@ -105,21 +101,9 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecord, Run: delRecord,
} }
setAdminCmd = &cobra.Command{
Use: "set-admin",
Short: "Sets admin for domain",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
},
Run: setAdmin,
}
) )
func init() { func init() {
@ -132,5 +116,4 @@ func init() {
initGetRecordsCmd() initGetRecordsCmd()
initDelRecordsCmd() initDelRecordsCmd()
initDelRecordCmd() initDelRecordCmd()
initSetAdminCmd()
} }

View file

@ -11,7 +11,6 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings" "strings"
"text/template" "text/template"
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client var c *rpcclient.Client
var err error var err error
shuffled := slices.Clone(rpc) shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled { for _, endpoint := range shuffled {

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag) outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" { if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644) err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err) commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else { } else {
fmt.Print("\n") fmt.Print("\n")

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo nodes map[string]netmap.NodeInfo
} }
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{ return &policyPlaygroundREPL{
cmd: cmd, cmd: cmd,
nodes: map[string]netmap.NodeInfo{}, nodes: map[string]netmap.NodeInfo{},
} }, nil
} }
func (repl *policyPlaygroundREPL) handleLs(args []string) error { func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies. Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd) repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
}, },
} }

View file

@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse var resp *control.GetNetmapStatusResponse
var err error var err error
err = cli.ExecRaw(func(client *rawclient.Client) error { err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(cmd.Context(), client, req) resp, err = control.GetNetmapStatus(client, req)
return err return err
}) })
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)

View file

@ -41,7 +41,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range") _ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")

View file

@ -320,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects { for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement { for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@ -358,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent placementObjectID = object.ecHeader.parent
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement { for _, vector := range placement {

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
} }
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
} }
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes") var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc) flags.Bool(rawFlag, false, rawFlagDesc)
} }
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
} }
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range") v := cmd.Flag("range").Value.String()
if len(vs) == 0 || err != nil { if len(v) == 0 {
return nil, err return nil, nil
} }
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs)) rs := make([]objectSDK.Range, len(vs))
for i := range vs { for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep) before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -33,9 +34,11 @@ func _client() (tree.TreeServiceClient, error) {
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor( grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(), tracing.NewUnaryClientInteceptor(),
), ),
grpc.WithChainStreamInterceptor( grpc.WithChainStreamInterceptor(
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(),
), ),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path path := parentBucket.Path
parser := parentBucket.NextParser parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer { for item := range buffer {
if item.err != nil { if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
} }
bucket := item.val bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil { if err != nil {
return err return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel() defer cancel()
// Check the current bucket's nested buckets if exist // Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer { for item := range bucketsBuffer {
if item.err != nil { if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
b := item.val b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil { if err != nil {
return false, err return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
// Check the current bucket's nested records if exist // Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer { for item := range recordsBuffer {
if item.err != nil { if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
r := item.val r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value) r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil { if err != nil {
return false, err return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any]( func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T, filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] { ) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize) buffer := make(chan Item[T], bufferSize)
go func() { go func() {
@ -77,13 +77,13 @@ func load[T any](
} }
}() }()
return buffer return buffer, nil
} }
func LoadBuckets( func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] { ) (<-chan Item[*Bucket], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value == nil return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
func LoadRecords( func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] { ) (<-chan Item[*Record], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value != nil return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
// HasBuckets checks if a bucket has nested buckets. It relies on assumption // HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
buffer := load( buffer, err := load(
ctx, db, path, 1, ctx, db, path, 1,
nil, nil,
func(_, value []byte) []byte { return value }, func(_, value []byte) []byte { return value },
) )
if err != nil {
return false, err
}
x, ok := <-buffer x, ok := <-buffer
if !ok { if !ok {
return false, nil return false, nil
} }
if x.err != nil { if x.err != nil {
return false, x.err return false, err
} }
if x.val != nil { if x.val != nil {
return false, nil return false, err
} }
return true, nil return true, nil
} }

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx) ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() { go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer { for item := range tempBuffer {
if item.err != nil { if item.err != nil {
v.ui.stopOnError(item.err) v.ui.stopOnError(err)
break break
} }
record := item.val record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil { if err != nil {
v.ui.stopOnError(err) v.ui.stopOnError(err)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"sync" "sync"
"time" "time"
@ -17,7 +16,7 @@ import (
"github.com/hashicorp/golang-lru/v2/expirable" "github.com/hashicorp/golang-lru/v2/expirable"
) )
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct { type valueWithError[V any] struct {
v V v V
@ -50,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL. // updates the value from the network on cache miss or by TTL.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -72,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e return val.v, val.e
} }
v, err := c.netRdr(ctx, key) v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{ c.cache.Add(key, &valueWithError[V]{
v: v, v: v,
@ -136,7 +135,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]
// updates the value from the network on cache miss. // updates the value from the network on cache miss.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -149,7 +148,7 @@ func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, e
return val, nil return val, nil
} }
val, err := c.netRdr(ctx, key) val, err := c.netRdr(key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -167,11 +166,11 @@ type ttlContainerStorage struct {
} }
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(ctx, id) return v.Get(id)
}, metrics.NewCacheMetrics("container")) }, metrics.NewCacheMetrics("container"))
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(ctx, id) return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info")) }, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{ return ttlContainerStorage{
@ -189,12 +188,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache // Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache. // or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(ctx, cnr) return s.containerCache.get(cnr)
} }
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(ctx, cnr) return s.delInfoCache.get(cnr)
} }
type lruNetmapSource struct { type lruNetmapSource struct {
@ -206,8 +205,8 @@ type lruNetmapSource struct {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10 const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key) return v.GetNetMapByEpoch(key)
}, metrics.NewCacheMetrics("netmap")) }, metrics.NewCacheMetrics("netmap"))
return &lruNetmapSource{ return &lruNetmapSource{
@ -216,16 +215,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
} }
} }
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
} }
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, epoch) return s.getNetMapByEpoch(epoch)
} }
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(ctx, epoch) val, err := s.cache.get(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -233,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil return val, nil
} }
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil return s.netState.CurrentEpoch(), nil
} }
@ -241,10 +240,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte] *ttlNetCache[struct{}, [][]byte]
} }
func newCachedIRFetcher(f interface { func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
InnerRingKeys(ctx context.Context) ([][]byte, error)
},
) cachedIRFetcher {
const ( const (
irFetcherCacheSize = 1 // we intend to store only one value irFetcherCacheSize = 1 // we intend to store only one value
@ -258,8 +254,8 @@ func newCachedIRFetcher(f interface {
) )
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(ctx context.Context, _ struct{}) ([][]byte, error) { func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys(ctx) return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"), }, metrics.NewCacheMetrics("ir_keys"),
) )
@ -269,8 +265,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates // the cache or expired, then it returns keys from side chain and updates
// the cache. // the cache.
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(ctx, struct{}{}) val, err := f.get(struct{}{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -293,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
} }
} }
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30 const ttl = time.Second * 30
hit := false hit := false
@ -315,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock() c.mtx.Lock()
size = c.lastSize size = c.lastSize
if !c.lastUpdated.After(prevUpdated) { if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize(ctx) size = c.src.MaxObjectSize()
c.lastSize = size c.lastSize = size
c.lastUpdated = time.Now() c.lastUpdated = time.Now()
} }

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
@ -18,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ti, val) require.Equal(t, ti, val)
}) })
@ -27,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration) time.Sleep(2 * ttlDuration)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
@ -36,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
cache.remove(key) cache.remove(key)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
t.Run("Test Cache Error", func(t *testing.T) { t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error")) cache.set("error", time.Now(), errors.New("mock error"))
_, err := cache.get(context.Background(), "error") _, err := cache.get("error")
require.Error(t, err) require.Error(t, err)
require.Equal(t, "mock error", err.Error()) require.Equal(t, "mock error", err.Error())
}) })
} }
func testNetValueReader(_ context.Context, key string) (time.Time, error) { func testNetValueReader(key string) (time.Time, error) {
if key == "error" { if key == "error" {
return time.Now(), errors.New("mock error") return time.Now(), errors.New("mock error")
} }

View file

@ -493,7 +493,6 @@ type cfg struct {
cfgNetmap cfgNetmap cfgNetmap cfgNetmap
cfgControlService cfgControlService cfgControlService cfgControlService
cfgObject cfgObject cfgObject cfgObject
cfgQoSService cfgQoSService
} }
// ReadCurrentNetMap reads network map which has been cached at the // ReadCurrentNetMap reads network map which has been cached at the
@ -699,7 +698,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector netState.metrics = c.metricsCollector
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
@ -853,8 +853,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
} }
func initCfgGRPC() cfgGRPC { func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
return cfgGRPC{ return cfgGRPC{
maxChunkSize: maxChunkSize, maxChunkSize: maxChunkSize,
@ -1059,7 +1059,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh return sh
} }
func (c *cfg) loggerPrm() *logger.Prm { func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before // check if it has been inited before
if c.dynamicConfiguration.logger == nil { if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm) c.dynamicConfiguration.logger = new(logger.Prm)
@ -1078,7 +1078,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
} }
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger return c.dynamicConfiguration.logger, nil
} }
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
@ -1147,7 +1147,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg) cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
} }
@ -1206,7 +1206,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
} }
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(ctx, epoch) ni, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch), zap.Uint64("epoch", epoch),
@ -1334,7 +1334,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger // Logger
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm) components := c.getComponents(ctx, logPrm)

View file

@ -1,7 +1,6 @@
package config package config
import ( import (
"slices"
"strings" "strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used // It supports only one level of nesting and is intended to be used
// to provide default values. // to provide default values.
func (x *Config) SetDefault(from *Config) { func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path) x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
} }

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
// //
// Returns PermDefault if the value is not a positive number. // Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm") p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 { if p == 0 {
p = PermDefault p = PermDefault
} }
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
// //
// Returns false if the value is not a boolean. // Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool { func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync") return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
} }
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode. // CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -1,46 +0,0 @@
package qos
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
subsection = "qos"
criticalSubSection = "critical"
internalSubSection = "internal"
)
// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, criticalSubSection)
}
// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, internalSubSection)
}
func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
pubs := make(keys.PublicKeys, 0, len(strKeys))
for i := range strKeys {
pub, err := keys.NewPublicKeyFromString(strKeys[i])
if err != nil {
panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
}
pubs = append(pubs, pub)
}
return pubs
}

View file

@ -1,40 +0,0 @@
package qos
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Empty(t, CriticalAuthorizedKeys(empty))
require.Empty(t, InternalAuthorizedKeys(empty))
})
const path = "../../../../config/example/node"
criticalPubs := make(keys.PublicKeys, 2)
criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
internalPubs := make(keys.PublicKeys, 2)
internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
fileConfigTest := func(c *config.Config) {
require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -100,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
// TODO: use owner directly from the event after neofs-contract#256 will become resolved // TODO: use owner directly from the event after neofs-contract#256 will become resolved
// but don't forget about the profit of reading the new container and caching it: // but don't forget about the profit of reading the new container and caching it:
// creation success are most commonly tracked by polling GET op. // creation success are most commonly tracked by polling GET op.
cnr, err := cnrSrc.Get(ctx, ev.ID) cnr, err := cnrSrc.Get(ev.ID)
if err == nil { if err == nil {
containerCache.containerCache.set(ev.ID, cnr, nil) containerCache.containerCache.set(ev.ID, cnr, nil)
} else { } else {
@ -221,25 +221,25 @@ type morphContainerReader struct {
src containerCore.Source src containerCore.Source
lister interface { lister interface {
ContainersOf(context.Context, *user.ID) ([]cid.ID, error) ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error IterateContainersOf(*user.ID, func(cid.ID) error) error
} }
} }
func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.src.Get(ctx, id) return x.src.Get(id)
} }
func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(ctx, id) return x.src.DeletionInfo(id)
} }
func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(ctx, id) return x.lister.ContainersOf(id)
} }
func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(ctx, id, processCID) return x.lister.IterateContainersOf(id, processCID)
} }
type morphContainerWriter struct { type morphContainerWriter struct {

View file

@ -7,12 +7,9 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -53,14 +50,7 @@ func initControlService(ctx context.Context, c *cfg) {
return return
} }
c.cfgControlService.server = grpc.NewServer( c.cfgControlService.server = grpc.NewServer()
grpc.ChainUnaryInterceptor(
qos.NewSetCriticalIOTagUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
),
// control service has no stream methods, so no stream interceptors added
)
c.onShutdown(func() { c.onShutdown(func() {
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"strings" "strings"
"time" "time"
@ -43,7 +42,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int
} }
} }
func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -56,7 +55,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return result.subject, result.err return result.subject, result.err
} }
subj, err := m.subjProvider.GetSubject(ctx, addr) subj, err := m.subjProvider.GetSubject(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjCache.Add(addr, subjectWithError{ m.subjCache.Add(addr, subjectWithError{
@ -70,7 +69,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return subj, nil return subj, nil
} }
func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -83,7 +82,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.
return result.subject, result.err return result.subject, result.err
} }
subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) subjExt, err := m.subjProvider.GetSubjectExtended(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjExtCache.Add(addr, subjectExtWithError{ m.subjExtCache.Add(addr, subjectExtWithError{

View file

@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
@ -131,12 +130,10 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
serverOpts := []grpc.ServerOption{ serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor( grpc.ChainUnaryInterceptor(
qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(),
), ),
grpc.ChainStreamInterceptor( grpc.ChainStreamInterceptor(
qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(),
), ),

View file

@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
initAccessPolicyEngine(ctx, c) initAccessPolicyEngine(ctx, c)
initAndLog(ctx, c, "access policy engine", func(c *cfg) { initAndLog(ctx, c, "access policy engine", func(c *cfg) {

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
} }
} }
s.setControlNetmapStatus(ctrlNetSt) s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
} }
// sets the current node state to the given value. Subsequent cfg.bootstrap // sets the current node state to the given value. Subsequent cfg.bootstrap
@ -239,7 +239,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state. // initNetmapState inits current Network map state.
// Must be called after Morph components initialization. // Must be called after Morph components initialization.
func initNetmapState(ctx context.Context, c *cfg) { func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err) fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo var ni *netmapSDK.NodeInfo
@ -278,7 +278,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
} }
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -291,7 +291,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
} }
} }
node, err := c.netmapLocalNodeState(ctx, epoch) node, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -312,9 +312,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
return candidate, nil return candidate, nil
} }
func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state // calculate current network state
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -376,8 +376,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
} }
func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
epoch, err := c.netMapSource.Epoch(ctx) epoch, err := c.netMapSource.Epoch()
if err != nil { if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
} }
@ -390,7 +390,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error {
} }
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil { if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed { } else if !netSettings.MaintenanceModeAllowed {
@ -438,7 +438,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error) msPerBlockRdr func() (int64, error)
} }
func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber() magic, err := n.magic.MagicNumber()
if err != nil { if err != nil {
return nil, err return nil, err
@ -448,7 +448,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic) ni.SetMagicNumber(magic)
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil { if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
} }

View file

@ -54,10 +54,10 @@ type objectSvc struct {
patch *patchsvc.Service patch *patchsvc.Service
} }
func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err), zap.Error(err),
) )
} }
@ -122,8 +122,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client sidechain *morphClient.Client
} }
func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
keys, err := fn.sidechain.NeoFSAlphabetList(ctx) keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil { if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
} }
@ -168,7 +168,7 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut) sPatch := createPatchSvc(sGet, sPut)
// build service pipeline // build service pipeline
// grpc | audit | qos | <metrics> | signature | response | acl | ape | split // grpc | audit | <metrics> | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
@ -191,8 +191,7 @@ func initObjectService(c *cfg) {
c.shared.metricsSvc = objectService.NewMetricCollector( c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc) server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
@ -216,7 +215,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr) prm.MarkAsGarbage(addr)
prm.WithForceRemoval() prm.WithForceRemoval()
return ls.Inhume(ctx, prm) _, err := ls.Inhume(ctx, prm)
return err
} }
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -266,7 +266,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr) inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil { _, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err), zap.Error(err),
) )
@ -475,7 +476,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...) prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm) _, err := e.engine.Inhume(ctx, prm)
return err
} }
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -1,95 +0,0 @@
package main
import (
"bytes"
"context"
qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
)
type cfgQoSService struct {
netmapSource netmap.Source
logger *logger.Logger
allowedCriticalPubs [][]byte
allowedInternalPubs [][]byte
}
func initQoSService(c *cfg) {
criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
rawInternalPubs := make([][]byte, 0, len(internalPubs))
for i := range criticalPubs {
rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
}
for i := range internalPubs {
rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
}
c.cfgQoSService = cfgQoSService{
netmapSource: c.netMapSource,
logger: c.log,
allowedCriticalPubs: rawCriticalPubs,
allowedInternalPubs: rawInternalPubs,
}
}
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
switch ioTag {
case qos.IOTagClient:
return ctx
case qos.IOTagCritical:
for _, pk := range s.allowedCriticalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}

View file

@ -29,16 +29,16 @@ type cnrSource struct {
cli *containerClient.Client cli *containerClient.Client
} }
func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
return c.src.Get(ctx, id) return c.src.Get(id)
} }
func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(ctx, cid) return c.src.DeletionInfo(cid)
} }
func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { func (c cnrSource) List() ([]cid.ID, error) {
return c.cli.ContainersOf(ctx, nil) return c.cli.ContainersOf(nil)
} }
func initTreeService(c *cfg) { func initTreeService(c *cfg) {
@ -72,7 +72,7 @@ func initTreeService(c *cfg) {
) )
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) tree.RegisterTreeServiceServer(s, c.treeService)
}) })
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {

View file

@ -225,6 +225,3 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_BALANCER=roundrobin
FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_RESTRICT=false
FROSTFS_MULTINET_FALLBACK_DELAY=350ms FROSTFS_MULTINET_FALLBACK_DELAY=350ms
FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"

View file

@ -305,19 +305,5 @@
"balancer": "roundrobin", "balancer": "roundrobin",
"restrict": false, "restrict": false,
"fallback_delay": "350ms" "fallback_delay": "350ms"
},
"qos": {
"critical": {
"authorized_keys": [
"035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
"028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
]
},
"internal": {
"authorized_keys": [
"02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
]
}
} }
} }

View file

@ -270,13 +270,3 @@ multinet:
balancer: roundrobin balancer: roundrobin
restrict: false restrict: false
fallback_delay: 350ms fallback_delay: 350ms
qos:
critical:
authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
- 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a

View file

@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release ## Post-release
### Prepare and push images to a Docker registry (automated) ### Prepare and push images to a Docker Hub (if not automated)
Create Docker images for all applications and push them into container registry Create Docker images for all applications and push them into Docker Hub
(executed automatically in Forgejo Actions upon pushing a release tag): (requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell ```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images $ make images
$ make push-images $ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
``` ```
### Make a proper release (if not automated) ### Make a proper release (if not automated)

View file

@ -26,8 +26,7 @@ There are some custom types used for brevity:
| `storage` | [Storage engine configuration](#storage-section) | | `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) | | `runtime` | [Runtime configuration](#runtime-section) |
| `audit` | [Audit configuration](#audit-section) | | `audit` | [Audit configuration](#audit-section) |
| `multinet` | [Multinet configuration](#multinet-section) | | `multinet` | [Multinet configuration](#multinet-section) |
| `qos` | [QoS configuration](#qos-section) |
# `control` section # `control` section
```yaml ```yaml
@ -472,20 +471,3 @@ multinet:
| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
# `qos` section
```yaml
qos:
critical:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
```
| Parameter | Type | Default value | Description |
| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |

5
go.mod
View file

@ -7,9 +7,8 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4
git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88

10
go.sum
View file

@ -6,12 +6,10 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76 h1:wzvSJIiS+p9qKfl3eg1oH6qlrjaEWiqTc/iMDKG3Ml4=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

View file

@ -146,6 +146,7 @@ const (
ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
@ -383,6 +384,7 @@ const (
FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown"
FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing"
FrostFSNodeConfigurationReading = "configuration reading" FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration"
@ -510,7 +512,4 @@ const (
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree" WritecacheCantGetObject = "can't get an object from fstree"
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
) )

View file

@ -1,51 +0,0 @@
package qos
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"google.golang.org/grpc"
)
func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String())
return handler(ctx, req)
}
}
func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
rawTag, ok := tagging.IOTagFromContext(ctx)
if !ok {
return invoker(ctx, method, req, reply, cc, opts...)
}
tag, err := FromRawString(rawTag)
if err != nil {
tag = IOTagClient
}
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
return invoker(ctx, method, req, reply, cc, opts...)
}
}
func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
rawTag, ok := tagging.IOTagFromContext(ctx)
if !ok {
return streamer(ctx, desc, cc, method, opts...)
}
tag, err := FromRawString(rawTag)
if err != nil {
tag = IOTagClient
}
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
return streamer(ctx, desc, cc, method, opts...)
}
}

View file

@ -1,39 +0,0 @@
package qos
import "fmt"
type IOTag string
const (
IOTagClient IOTag = "client"
IOTagInternal IOTag = "internal"
IOTagBackground IOTag = "background"
IOTagWritecache IOTag = "writecache"
IOTagPolicer IOTag = "policer"
IOTagCritical IOTag = "critical"
ioTagUnknown IOTag = ""
)
func FromRawString(s string) (IOTag, error) {
switch s {
case string(IOTagCritical):
return IOTagCritical, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagInternal):
return IOTagInternal, nil
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagWritecache):
return IOTagWritecache, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
default:
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
}
}
func (t IOTag) String() string {
return string(t)
}

View file

@ -1,7 +1,6 @@
package request package request
import ( import (
"context"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
@ -13,9 +12,9 @@ import (
) )
// FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID.
func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) {
reqProps := make(map[string]string) reqProps := make(map[string]string)
subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil { if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err) return nil, fmt.Errorf("get subject error: %w", err)
@ -37,8 +36,8 @@ func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfs
} }
// Groups return the actor's group ids from frostfsid contract. // Groups return the actor's group ids from frostfsid contract.
func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) {
subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash())
if err != nil { if err != nil {
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
return nil, fmt.Errorf("get subject error: %w", err) return nil, fmt.Errorf("get subject error: %w", err)

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"sync" "sync"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
@ -20,7 +19,7 @@ type infoValue struct {
} }
type InfoProvider interface { type InfoProvider interface {
Info(ctx context.Context, id cid.ID) (Info, error) Info(id cid.ID) (Info, error)
} }
type infoProvider struct { type infoProvider struct {
@ -44,13 +43,13 @@ func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
} }
} }
func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { func (r *infoProvider) Info(id cid.ID) (Info, error) {
v, found := r.tryGetFromCache(id) v, found := r.tryGetFromCache(id)
if found { if found {
return v.info, v.err return v.info, v.err
} }
return r.getFromSource(ctx, id) return r.getFromSource(id)
} }
func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
@ -61,7 +60,7 @@ func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
return value, found return value, found
} }
func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { func (r *infoProvider) getFromSource(id cid.ID) (Info, error) {
r.kl.Lock(id) r.kl.Lock(id)
defer r.kl.Unlock(id) defer r.kl.Unlock(id)
@ -76,11 +75,11 @@ func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, erro
return Info{}, r.sourceErr return Info{}, r.sourceErr
} }
cnr, err := r.source.Get(ctx, id) cnr, err := r.source.Get(id)
var civ infoValue var civ infoValue
if err != nil { if err != nil {
if client.IsErrContainerNotFound(err) { if client.IsErrContainerNotFound(err) {
removed, err := WasRemoved(ctx, r.source, id) removed, err := WasRemoved(r.source, id)
if err != nil { if err != nil {
civ.err = err civ.err = err
} else { } else {

View file

@ -1,8 +1,6 @@
package container package container
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@ -43,9 +41,9 @@ type Source interface {
// //
// Implementations must not retain the container pointer and modify // Implementations must not retain the container pointer and modify
// the container through it. // the container through it.
Get(ctx context.Context, cid cid.ID) (*Container, error) Get(cid.ID) (*Container, error)
DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) DeletionInfo(cid.ID) (*DelInfo, error)
} }
// EACL groups information about the FrostFS container's extended ACL stored in // EACL groups information about the FrostFS container's extended ACL stored in

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"errors" "errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -11,8 +10,8 @@ import (
// WasRemoved checks whether the container ever existed or // WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch. // it just has not been created yet at the current epoch.
func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { func WasRemoved(s Source, cid cid.ID) (bool, error) {
_, err := s.DeletionInfo(ctx, cid) _, err := s.DeletionInfo(cid)
if err == nil { if err == nil {
return true, nil return true, nil
} }

View file

@ -1,8 +1,6 @@
package frostfsid package frostfsid
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
) )
@ -13,6 +11,6 @@ const (
// SubjectProvider interface provides methods to get subject from FrostfsID contract. // SubjectProvider interface provides methods to get subject from FrostfsID contract.
type SubjectProvider interface { type SubjectProvider interface {
GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) GetSubject(util.Uint160) (*client.Subject, error)
GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error)
} }

View file

@ -1,8 +1,6 @@
package netmap package netmap
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
) )
@ -18,7 +16,7 @@ type Source interface {
// //
// Implementations must not retain the network map pointer and modify // Implementations must not retain the network map pointer and modify
// the network map through it. // the network map through it.
GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) GetNetMap(diff uint64) (*netmap.NetMap, error)
// GetNetMapByEpoch reads network map by the epoch number from the storage. // GetNetMapByEpoch reads network map by the epoch number from the storage.
// It returns the pointer to the requested network map and any error encountered. // It returns the pointer to the requested network map and any error encountered.
@ -27,21 +25,21 @@ type Source interface {
// //
// Implementations must not retain the network map pointer and modify // Implementations must not retain the network map pointer and modify
// the network map through it. // the network map through it.
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
// Epoch reads the current epoch from the storage. // Epoch reads the current epoch from the storage.
// It returns thw number of the current epoch and any error encountered. // It returns thw number of the current epoch and any error encountered.
// //
// Must return exactly one non-default value. // Must return exactly one non-default value.
Epoch(ctx context.Context) (uint64, error) Epoch() (uint64, error)
} }
// GetLatestNetworkMap requests and returns the latest network map from the storage. // GetLatestNetworkMap requests and returns the latest network map from the storage.
func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
return src.GetNetMap(ctx, 0) return src.GetNetMap(0)
} }
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
return src.GetNetMap(ctx, 1) return src.GetNetMap(1)
} }

View file

@ -199,7 +199,7 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD
cnrIDBin := make([]byte, sha256.Size) cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin) cnrID.Encode(cnrIDBin)
cnr, err := v.containers.Get(ctx, cnrID) cnr, err := v.containers.Get(cnrID)
if err != nil { if err != nil {
return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
} }

View file

@ -578,7 +578,7 @@ type testIRSource struct {
irNodes [][]byte irNodes [][]byte
} }
func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil return s.irNodes, nil
} }
@ -586,14 +586,14 @@ type testContainerSource struct {
containers map[cid.ID]*container.Container containers map[cid.ID]*container.Container
} }
func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found { if cnr, found := s.containers[cnrID]; found {
return cnr, nil return cnr, nil
} }
return nil, fmt.Errorf("container not found") return nil, fmt.Errorf("container not found")
} }
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil return nil, nil
} }
@ -602,20 +602,20 @@ type testNetmapSource struct {
currentEpoch uint64 currentEpoch uint64
} }
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch { if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff") return nil, fmt.Errorf("invalid diff")
} }
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) return s.GetNetMapByEpoch(s.currentEpoch - diff)
} }
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found { if nm, found := s.netmaps[epoch]; found {
return nm, nil return nm, nil
} }
return nil, fmt.Errorf("netmap not found") return nil, fmt.Errorf("netmap not found")
} }
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { func (s *testNetmapSource) Epoch() (uint64, error) {
return s.currentEpoch, nil return s.currentEpoch, nil
} }

View file

@ -18,7 +18,7 @@ import (
) )
type InnerRing interface { type InnerRing interface {
InnerRingKeys(ctx context.Context) ([][]byte, error) InnerRingKeys() ([][]byte, error)
} }
type SenderClassifier struct { type SenderClassifier struct {
@ -63,7 +63,7 @@ func (c SenderClassifier) Classify(
} }
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil { if err != nil {
// do not throw error, try best case matching // do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
@ -78,7 +78,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
binCnr := make([]byte, sha256.Size) binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr) idCnr.Encode(binCnr)
isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
if err != nil { if err != nil {
// error might happen if request has `RoleOther` key and placement // error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so // is not possible for previous epoch, so
@ -99,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
}, nil }, nil
} }
func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil { if err != nil {
return false, err return false, err
} }
@ -116,11 +116,10 @@ func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (boo
} }
func (c SenderClassifier) isContainerKey( func (c SenderClassifier) isContainerKey(
ctx context.Context,
owner, idCnr []byte, owner, idCnr []byte,
cnr container.Container, cnr container.Container,
) (bool, error) { ) (bool, error) {
nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
if err != nil { if err != nil {
return false, err return false, err
} }
@ -134,7 +133,7 @@ func (c SenderClassifier) isContainerKey(
// then check previous netmap, this can happen in-between epoch change // then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container // when node migrates data from last epoch container
nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) nm, err = core.GetPreviousNetworkMap(c.netmap)
if err != nil { if err != nil {
return false, err return false, err
} }

View file

@ -0,0 +1,172 @@
Running tool: /usr/local/go/bin/go test -benchmem -run=^$ -tags integration -bench ^BenchmarkMClock$ git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/quota -count=1
goos: linux
goarch: amd64
pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/quota
cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
BenchmarkMClock/noop,_1_parallelism-8 8622 138852 ns/op 0 B/op 0 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_1_tags-8 7750 143396 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_2_tags-8 7890 140165 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_4_tags-8 8481 142988 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_8_tags-8 7778 143392 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_1_parallelism,_16_tags-8 8174 142753 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_1_tags-8 8428 141928 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_2_tags-8 8401 142478 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_4_tags-8 7838 143398 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_8_tags-8 8358 143362 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7646 143140 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_1_tags-8 7792 142760 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_2_tags-8 8359 144336 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_4_tags-8 8433 142473 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_8_tags-8 8074 141951 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7893 143826 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 8542 143451 ns/op 387 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 8462 143230 ns/op 386 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 7852 140847 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 8299 142257 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 8274 143428 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_1_tags-8 5896 182277 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_2_tags-8 6312 164867 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_4_tags-8 7076 153351 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_8_tags-8 7918 147351 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_1_parallelism,_16_tags-8 7756 146122 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_1_tags-8 6823 182441 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_2_tags-8 6462 163464 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_4_tags-8 7828 152245 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_8_tags-8 7338 147831 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_1_parallelism,_16_tags-8 7875 145634 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_1_tags-8 6800 178145 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_2_tags-8 6358 163197 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_4_tags-8 7752 153441 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_8_tags-8 7791 148652 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_1_parallelism,_16_tags-8 7851 144684 ns/op 390 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_1_tags-8 5810 182836 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_2_tags-8 6638 160513 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_4_tags-8 7846 152878 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_8_tags-8 7684 147061 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_1_parallelism,_16_tags-8 7780 145393 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/noop,_8_parallelism-8 8485 139644 ns/op 0 B/op 0 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_1_tags-8 8170 142515 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_2_tags-8 7875 142867 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_4_tags-8 7293 142487 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_8_tags-8 8222 142765 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_8_parallelism,_16_tags-8 8317 142517 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_1_tags-8 7639 142420 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_2_tags-8 7912 143456 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_4_tags-8 7863 143354 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_8_tags-8 7605 143204 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_8_parallelism,_16_tags-8 7837 142883 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_1_tags-8 8346 143170 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_2_tags-8 8270 142266 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_4_tags-8 8330 143951 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_8_tags-8 8420 143760 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_8_parallelism,_16_tags-8 7776 142903 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 8458 143242 ns/op 387 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 7788 143594 ns/op 385 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 8486 143527 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 8178 142912 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 7953 143021 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_1_tags-8 6120 182367 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_2_tags-8 6740 163498 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_4_tags-8 7536 156347 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_8_tags-8 7845 149975 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_8_parallelism,_16_tags-8 7728 145382 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_1_tags-8 6620 176141 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_2_tags-8 7160 168321 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_4_tags-8 6784 154220 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_8_tags-8 7641 147644 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_8_parallelism,_16_tags-8 7839 146256 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_1_tags-8 6825 180484 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_2_tags-8 6726 160547 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_4_tags-8 7887 158786 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_8_tags-8 7756 148823 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_8_parallelism,_16_tags-8 8456 145816 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_1_tags-8 5749 183312 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_2_tags-8 7070 163066 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_4_tags-8 7608 151387 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_8_tags-8 7801 148704 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_8_parallelism,_16_tags-8 8394 145528 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/noop,_32_parallelism-8 8449 140882 ns/op 0 B/op 0 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_1_tags-8 8341 144258 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_2_tags-8 8190 144026 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_4_tags-8 8432 142862 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_8_tags-8 8221 142749 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_32_parallelism,_16_tags-8 8260 142720 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_1_tags-8 8449 143299 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_2_tags-8 8289 142904 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_4_tags-8 7720 143325 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7814 143928 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_32_parallelism,_16_tags-8 8314 142920 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_1_tags-8 7831 142008 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_2_tags-8 7707 143310 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_4_tags-8 8436 144178 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_8_tags-8 8422 143256 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_32_parallelism,_16_tags-8 7912 142875 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 7935 142824 ns/op 387 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 7815 143701 ns/op 385 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7846 143185 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 7292 143897 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 7800 143270 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_1_tags-8 6714 178822 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_2_tags-8 7051 161121 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_4_tags-8 7857 152451 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_8_tags-8 7890 147844 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_32_parallelism,_16_tags-8 7592 145755 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_1_tags-8 6048 177194 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_2_tags-8 6858 161059 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_4_tags-8 7696 154697 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_8_tags-8 7346 148836 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_32_parallelism,_16_tags-8 8379 145981 ns/op 390 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_1_tags-8 6462 183121 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_2_tags-8 7252 159800 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_4_tags-8 7234 152839 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_8_tags-8 7822 147884 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_32_parallelism,_16_tags-8 7759 144517 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_1_tags-8 6594 175730 ns/op 385 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_2_tags-8 7651 160258 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_4_tags-8 7653 151893 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_8_tags-8 7148 146893 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_32_parallelism,_16_tags-8 8280 144532 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/noop,_64_parallelism-8 7826 138650 ns/op 0 B/op 0 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_1_tags-8 7788 143331 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_2_tags-8 8160 143617 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_4_tags-8 8360 143654 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_8_tags-8 8473 143208 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_no_reservation,_64_parallelism,_16_tags-8 8170 142293 ns/op 381 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_1_tags-8 7874 143752 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_2_tags-8 8271 142250 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_4_tags-8 8403 143406 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_8_tags-8 7767 143181 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_1.0_reservation,_64_parallelism,_16_tags-8 8377 143528 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_1_tags-8 8172 142732 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_2_tags-8 8367 143579 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_4_tags-8 7752 143488 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7873 143070 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_100.0_reservation,_64_parallelism,_16_tags-8 7852 143083 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 8299 142874 ns/op 387 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 7737 141432 ns/op 385 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 7647 143605 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 8356 142787 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_no_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 8290 141838 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_1_tags-8 5763 181437 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_2_tags-8 6380 162726 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_4_tags-8 7178 161958 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_8_tags-8 7790 154252 ns/op 380 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_no_reservation,_64_parallelism,_16_tags-8 7638 150472 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_1_tags-8 5731 184777 ns/op 389 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_2_tags-8 7092 165548 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_4_tags-8 7207 156112 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_8_tags-8 6919 151170 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_1.0_reservation,_64_parallelism,_16_tags-8 8192 146798 ns/op 390 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_1_tags-8 6075 184531 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_2_tags-8 6435 164215 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_4_tags-8 7815 152395 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_8_tags-8 7689 149567 ns/op 388 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_100.0_reservation,_64_parallelism,_16_tags-8 7846 147134 ns/op 390 B/op 9 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_1_tags-8 5758 190913 ns/op 384 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_2_tags-8 6462 172379 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_4_tags-8 6819 156184 ns/op 383 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_8_tags-8 7365 152747 ns/op 382 B/op 8 allocs/op
BenchmarkMClock/mclock,_100000.0_limit,_10000.0_reservation,_64_parallelism,_16_tags-8 8094 147905 ns/op 382 B/op 8 allocs/op
PASS
ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/quota 198.209s

View file

@ -0,0 +1,87 @@
package quota
import (
"context"
"fmt"
"math"
"math/rand/v2"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
)
type noopMClockScheduler struct{}
var (
releaseStub func() = func() {}
defaultLimit float64 = 100_000
shortReservation float64 = 1
medReservation float64 = 100
largeReservation float64 = 100_00
)
func (s *noopMClockScheduler) RequestArrival(context.Context, string) func() {
return releaseStub
}
func BenchmarkMClock(b *testing.B) {
tagsCount := []int{1, 2, 4, 8, 16}
ioDuration := time.Millisecond
parallelismValues := []int{1, 8, 32, 64}
limits := []*float64{nil, &defaultLimit}
reservations := []*float64{nil, &shortReservation, &medReservation, &largeReservation}
for _, parallelism := range parallelismValues {
b.SetParallelism(parallelism)
noopMClock := &noopMClockScheduler{}
b.Run(fmt.Sprintf("noop, %d parallelism", parallelism), func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
release := noopMClock.RequestArrival(context.Background(), "tag")
time.Sleep(ioDuration)
release()
}
})
})
for _, limit := range limits {
for _, reservation := range reservations {
for _, tags := range tagsCount {
tagInfos := make(map[string]TagInfo)
for tag := 0; tag < tags; tag++ {
tagInfos["tag"+strconv.FormatInt(int64(tag), 10)] = TagInfo{shares: 50, limit: limit, reservation: reservation}
}
mClockQ := NewMClockQueue(math.MaxUint64, math.MaxUint64, tagInfos, math.MaxFloat64)
resStr := "no"
if reservation != nil {
resStr = strconv.FormatFloat(*reservation, 'f', 1, 64)
}
limitStr := "no"
if limit != nil {
limitStr = strconv.FormatFloat(*limit, 'f', 1, 64)
}
b.Run(fmt.Sprintf("mclock, %s limit, %s reservation, %d parallelism, %d tags", limitStr, resStr, parallelism, tags), func(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
tag := rand.Int64N(int64(tags))
release, err := mClockQ.RequestArrival(context.Background(), "tag"+strconv.FormatInt(int64(tag), 10))
require.NoError(b, err)
time.Sleep(ioDuration)
release()
}
})
})
}
}
}
}
}

21
pkg/core/quota/context.go Normal file
View file

@ -0,0 +1,21 @@
package quota
import "context"
type tagContextKeyType int
const currentTagKey tagContextKeyType = iota
func ContextWithIOTag(parent context.Context, tag string) context.Context {
return context.WithValue(parent, currentTagKey, tag)
}
func IOTagFromContext(ctx context.Context) (string, bool) {
if ctx == nil {
panic("context must be non nil")
}
if tag, ok := ctx.Value(currentTagKey).(string); ok {
return tag, true
}
return "", false
}

View file

@ -0,0 +1,23 @@
package quota
import (
"context"
"testing"
"github.com/stretchr/testify/require"
)
func TestContext(t *testing.T) {
ctx := context.Background()
tag, ok := IOTagFromContext(ctx)
require.False(t, ok)
require.Equal(t, "", tag)
ctx = ContextWithIOTag(ctx, "tag1")
tag, ok = IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, "tag1", tag)
ctx = ContextWithIOTag(ctx, "tag2")
tag, ok = IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, "tag2", tag)
}

96
pkg/core/quota/grpc.go Normal file
View file

@ -0,0 +1,96 @@
package quota
import (
"context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
ioTagHeader = "x-frostfs-io-tag"
)
// NewUnaryClientInteceptor creates new gRPC unary interceptor to set an IO tag to gRPC metadata.
func NewUnaryClientInteceptor() grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return invoker(setIOTagToGRPCMetadata(ctx), method, req, reply, cc, opts...)
}
}
// NewStreamClientInterceptor creates new gRPC stream interceptor to set an IO tag to gRPC metadata.
func NewStreamClientInterceptor() grpc.StreamClientInterceptor {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return streamer(setIOTagToGRPCMetadata(ctx), desc, cc, method, opts...)
}
}
// NewUnaryServerInterceptor creates new gRPC unary interceptor to extract an IO tag to gRPC metadata.
func NewUnaryServerInterceptor() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
return handler(extractIOTagFromGRPCMetadata(ctx), req)
}
}
// NewStreamServerInterceptor creates new gRPC stream interceptor to extract an IO tag to gRPC metadata.
func NewStreamServerInterceptor() grpc.StreamServerInterceptor {
return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
return handler(srv, &serverStream{origin: ss})
}
}
func setIOTagToGRPCMetadata(ctx context.Context) context.Context {
ioTag, ok := IOTagFromContext(ctx)
if !ok {
return ctx
}
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
md = metadata.MD{}
}
md.Set(ioTagHeader, ioTag)
return metadata.NewOutgoingContext(ctx, md)
}
func extractIOTagFromGRPCMetadata(ctx context.Context) context.Context {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return ctx
}
values := md.Get(ioTagHeader)
if len(values) > 0 {
return ContextWithIOTag(ctx, values[0])
}
return ctx
}
var _ grpc.ServerStream = &serverStream{}
type serverStream struct {
origin grpc.ServerStream
}
func (s *serverStream) Context() context.Context {
return extractIOTagFromGRPCMetadata(s.origin.Context())
}
func (s *serverStream) RecvMsg(m any) error {
return s.origin.RecvMsg(m)
}
func (s *serverStream) SendHeader(md metadata.MD) error {
return s.origin.SendHeader(md)
}
func (s *serverStream) SendMsg(m any) error {
return s.origin.SendMsg(m)
}
func (s *serverStream) SetHeader(md metadata.MD) error {
return s.origin.SetHeader(md)
}
func (s *serverStream) SetTrailer(md metadata.MD) {
s.origin.SetTrailer(md)
}

536
pkg/core/quota/mclock.go Normal file
View file

@ -0,0 +1,536 @@
package quota
import (
"container/heap"
"context"
"errors"
"math"
"sync"
"time"
)
const (
invalidIndex = -1
undefinedReservation float64 = -1.0
)
var (
ErrSchedulerClosed = errors.New("mClock scheduler is closed")
ErrSchedulerRequestLimitExceeded = errors.New("mClock scheduler request limit exceeded")
)
type Release func()
type mQueueItem interface {
ts() float64
setIndex(idx int)
}
type mQueue struct {
items []mQueueItem
}
type request struct {
tag string
ts float64
reservation float64
limit float64
shares float64
reservationIdx int
limitIdx int
sharesIdx int
readyIdx int
scheduled chan struct{}
canceled chan struct{}
}
type TagInfo struct {
reservation *float64
limit *float64
shares float64
}
type clock interface {
now() float64
runAt(ts float64, f func())
close()
}
type MClockQueue struct {
runLimit uint64
waitLimit int
clock clock
idleTimeout float64
tagInfo map[string]TagInfo
mtx sync.Mutex
previous map[string]*request
inProgress uint64
lastSchedule float64
reservationQueue *mQueue
limitQueue *mQueue
sharesQueue *mQueue
readyQueue *mQueue
closed bool
}
func NewMClockQueue(runLimit, waitLimit uint64, tagInfo map[string]TagInfo, idleTimeout float64) *MClockQueue {
result := &MClockQueue{
runLimit: runLimit,
waitLimit: int(waitLimit),
clock: newSystemClock(),
idleTimeout: idleTimeout,
tagInfo: tagInfo,
reservationQueue: &mQueue{
items: make([]mQueueItem, 0),
},
limitQueue: &mQueue{
items: make([]mQueueItem, 0),
},
sharesQueue: &mQueue{
items: make([]mQueueItem, 0),
},
readyQueue: &mQueue{
items: make([]mQueueItem, 0),
},
}
previous := make(map[string]*request)
for tag := range tagInfo {
previous[tag] = &request{
tag: tag,
reservationIdx: invalidIndex,
limitIdx: invalidIndex,
sharesIdx: invalidIndex,
}
}
result.previous = previous
return result
}
func (q *MClockQueue) RequestArrival(ctx context.Context, tag string) (Release, error) {
req, release, err := q.pushRequest(tag)
if err != nil {
return nil, err
}
select {
case <-ctx.Done():
q.dropRequest(req)
return nil, ctx.Err()
case <-req.scheduled:
return release, nil
case <-req.canceled:
return nil, ErrSchedulerClosed
}
}
func (q *MClockQueue) Close() {
q.mtx.Lock()
defer q.mtx.Unlock()
q.closed = true
q.clock.close()
for q.limitQueue.Len() > 0 {
item := heap.Pop(q.limitQueue).(*limitMQueueItem)
close(item.r.canceled)
q.removeFromQueues(item.r)
}
}
func (q *MClockQueue) dropRequest(req *request) {
q.mtx.Lock()
defer q.mtx.Unlock()
select {
case <-req.scheduled:
if q.inProgress == 0 {
panic("invalid requests count")
}
q.inProgress--
default:
}
q.removeFromQueues(req)
}
func (q *MClockQueue) pushRequest(tag string) (*request, Release, error) {
q.mtx.Lock()
defer q.mtx.Unlock()
if q.closed {
return nil, nil, ErrSchedulerClosed
}
if q.sharesQueue.Len() == q.waitLimit {
return nil, nil, ErrSchedulerRequestLimitExceeded
}
now := q.clock.now()
tagInfo, ok := q.tagInfo[tag]
if !ok {
panic("unknown tag: " + tag)
}
prev, ok := q.previous[tag]
if !ok {
panic("undefined previous: " + tag)
}
if now-prev.ts > q.idleTimeout { // was inactive for q.idleTimeout
q.adjustTags(now, tag)
}
r := &request{
tag: tag,
ts: now,
shares: max(prev.shares+1.0/tagInfo.shares, now),
reservationIdx: invalidIndex,
limitIdx: invalidIndex,
sharesIdx: invalidIndex,
readyIdx: invalidIndex,
scheduled: make(chan struct{}),
canceled: make(chan struct{}),
}
if tagInfo.reservation != nil {
r.reservation = max(prev.reservation + 1.0 / *tagInfo.reservation, now)
} else {
r.reservation = undefinedReservation
}
if tagInfo.limit != nil {
r.limit = max(prev.limit + 1.0 / *tagInfo.limit, now)
} else {
r.limit = max(prev.limit, now)
}
q.previous[tag] = r
if tagInfo.reservation != nil {
heap.Push(q.reservationQueue, &reservationMQueueItem{r: r})
}
heap.Push(q.sharesQueue, &sharesMQueueItem{r: r})
heap.Push(q.limitQueue, &limitMQueueItem{r: r})
q.scheduleRequest(true)
return r, q.requestCompleted, nil
}
func (q *MClockQueue) adjustTags(now float64, idleTag string) {
if q.sharesQueue.Len() == 0 {
return
}
minShare := q.sharesQueue.items[0].ts()
for _, item := range q.limitQueue.items { // limitQueue has all requests and sharesQueue may be fixed
limitItem := (item).(*limitMQueueItem)
if limitItem.r.tag == idleTag {
continue
}
limitItem.r.shares -= (minShare - now)
if limitItem.r.sharesIdx != invalidIndex {
heap.Fix(q.sharesQueue, limitItem.r.sharesIdx)
}
if limitItem.r.readyIdx != invalidIndex {
heap.Fix(q.readyQueue, limitItem.r.readyIdx)
}
}
}
func (q *MClockQueue) scheduleRequest(lockTaken bool) {
if !lockTaken {
q.mtx.Lock()
defer q.mtx.Unlock()
}
if q.inProgress >= q.runLimit {
return
}
now := q.clock.now()
q.scheduleByReservation(now)
if q.inProgress >= q.runLimit {
return
}
q.scheduleByLimitAndWeight(now)
if q.inProgress >= q.runLimit || (q.reservationQueue.Len() == 0 && q.limitQueue.Len() == 0) {
return
}
q.setNextScheduleTimer(now)
}
func (q *MClockQueue) setNextScheduleTimer(now float64) {
nextTs := math.MaxFloat64
if q.reservationQueue.Len() > 0 {
nextTs = q.reservationQueue.items[0].ts()
}
if q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() < nextTs {
nextTs = q.limitQueue.items[0].ts()
}
if q.lastSchedule < now && q.lastSchedule > nextTs {
q.clock.runAt(nextTs, func() {
q.scheduleRequest(false)
})
q.lastSchedule = nextTs
}
}
func (q *MClockQueue) scheduleByLimitAndWeight(now float64) {
for q.limitQueue.Len() > 0 && q.limitQueue.items[0].ts() <= now {
ready := heap.Pop(q.limitQueue).(*limitMQueueItem)
heap.Push(q.readyQueue, &readyMQueueItem{r: ready.r})
}
for q.inProgress < q.runLimit && q.readyQueue.Len() > 0 {
next := heap.Pop(q.readyQueue).(*readyMQueueItem)
hadReservation := false
if next.r.reservationIdx != invalidIndex {
hadReservation = true
heap.Remove(q.reservationQueue, next.r.reservationIdx)
}
q.removeFromQueues(next.r)
tagInfo, ok := q.tagInfo[next.r.tag]
if !ok {
panic("unknown tag: " + next.r.tag)
}
if tagInfo.reservation != nil && hadReservation {
var updated bool
for _, i := range q.reservationQueue.items {
ri := i.(*reservationMQueueItem)
if ri.r.tag == next.r.tag && ri.r.reservation > next.r.reservation {
ri.r.reservation -= 1.0 / *tagInfo.reservation
updated = true
}
}
if updated {
heap.Init(q.reservationQueue)
}
}
select {
case <-next.r.canceled:
continue
default:
}
assertIndexInvalid(next.r)
q.inProgress++
close(next.r.scheduled)
}
}
func (q *MClockQueue) scheduleByReservation(now float64) {
for q.inProgress < q.runLimit && q.reservationQueue.Len() > 0 && q.reservationQueue.items[0].ts() <= now {
next := heap.Pop(q.reservationQueue).(*reservationMQueueItem)
q.removeFromQueues(next.r)
select {
case <-next.r.canceled:
continue
default:
}
assertIndexInvalid(next.r)
q.inProgress++
close(next.r.scheduled)
}
}
func (q *MClockQueue) removeFromQueues(r *request) {
if r.limitIdx != invalidIndex {
heap.Remove(q.limitQueue, r.limitIdx)
}
if r.sharesIdx != invalidIndex {
heap.Remove(q.sharesQueue, r.sharesIdx)
}
if r.readyIdx != invalidIndex {
heap.Remove(q.readyQueue, r.readyIdx)
}
if r.reservationIdx != invalidIndex {
heap.Remove(q.reservationQueue, r.readyIdx)
}
}
func (q *MClockQueue) requestCompleted() {
q.mtx.Lock()
defer q.mtx.Unlock()
if q.closed {
return
}
if q.inProgress == 0 {
panic("invalid requests count")
}
q.inProgress--
q.scheduleRequest(true)
}
func assertIndexInvalid(r *request) {
if r.limitIdx != invalidIndex {
panic("limitIdx is not -1")
}
if r.sharesIdx != invalidIndex {
panic("sharesIdx is not -1")
}
if r.reservationIdx != invalidIndex {
panic("reservationIdx is not -1")
}
if r.readyIdx != invalidIndex {
panic("readyIdx is not -1")
}
}
// Len implements heap.Interface.
func (q *mQueue) Len() int {
return len(q.items)
}
// Less implements heap.Interface.
func (q *mQueue) Less(i int, j int) bool {
return q.items[i].ts() < q.items[j].ts()
}
// Pop implements heap.Interface.
func (q *mQueue) Pop() any {
n := len(q.items)
item := q.items[n-1]
q.items[n-1] = nil
q.items = q.items[0 : n-1]
item.setIndex(invalidIndex)
return item
}
// Push implements heap.Interface.
func (q *mQueue) Push(x any) {
it := x.(mQueueItem)
it.setIndex(q.Len())
q.items = append(q.items, it)
}
// Swap implements heap.Interface.
func (q *mQueue) Swap(i int, j int) {
q.items[i], q.items[j] = q.items[j], q.items[i]
q.items[i].setIndex(i)
q.items[j].setIndex(j)
}
var _ mQueueItem = &reservationMQueueItem{}
type reservationMQueueItem struct {
r *request
}
func (i *reservationMQueueItem) ts() float64 {
return i.r.reservation
}
func (i *reservationMQueueItem) setIndex(idx int) {
i.r.reservationIdx = idx
}
var _ mQueueItem = &limitMQueueItem{}
type limitMQueueItem struct {
r *request
}
func (i *limitMQueueItem) ts() float64 {
return i.r.limit
}
func (i *limitMQueueItem) setIndex(idx int) {
i.r.limitIdx = idx
}
var _ mQueueItem = &sharesMQueueItem{}
type sharesMQueueItem struct {
r *request
}
func (i *sharesMQueueItem) ts() float64 {
return i.r.shares
}
func (i *sharesMQueueItem) setIndex(idx int) {
i.r.sharesIdx = idx
}
var _ mQueueItem = &readyMQueueItem{}
type readyMQueueItem struct {
r *request
}
func (i *readyMQueueItem) ts() float64 {
return i.r.shares
}
func (i *readyMQueueItem) setIndex(idx int) {
i.r.readyIdx = idx
}
type scheduleInfo struct {
ts float64
f func()
}
type systemClock struct {
since time.Time
schedule chan scheduleInfo
wg sync.WaitGroup
}
func newSystemClock() *systemClock {
c := &systemClock{
since: time.Now(),
}
c.start()
return c
}
func (c *systemClock) now() float64 {
return time.Since(c.since).Seconds()
}
func (c *systemClock) runAt(ts float64, f func()) {
c.schedule <- scheduleInfo{ts: ts, f: f}
}
func (c *systemClock) close() {
close(c.schedule)
c.wg.Wait()
}
func (c *systemClock) start() {
c.wg.Add(1)
go func() {
defer c.wg.Done()
t := time.NewTimer(time.Hour)
var f func()
for {
select {
case <-t.C:
if f != nil {
f()
f = nil
}
t.Reset(time.Hour)
case s, ok := <-c.schedule:
if !ok {
return
}
now := c.now()
if now >= s.ts {
s.f()
f = nil
continue
}
t.Reset(time.Duration((s.ts - now) * 1e9))
f = s.f
}
}
}()
}

View file

@ -0,0 +1,400 @@
package quota
import (
"context"
"math"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestMClockSharesScheduling(t *testing.T) {
t.Parallel()
reqCount := 1000
reqCount = (reqCount / 2) * 2
q := NewMClockQueue(1, math.MaxUint64, map[string]TagInfo{
"class1": {shares: 2},
"class2": {shares: 1},
}, 100)
q.clock = &noopClock{}
var releases []Release
var requests []*request
tag := "class1"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
tag = "class2"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
var result []string
var wg sync.WaitGroup
for i := 0; i < reqCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
<-requests[i].scheduled
result = append(result, requests[i].tag)
releases[i]()
}()
}
wg.Wait()
// Requests must be scheduled as class1->class1->class2->class1->class1->class2...,
// because the ratio is 2 to 1.
// However, there may be deviations due to rounding and sorting.
result = result[:reqCount/2+(reqCount/2)/2] // last reqCount/4 requests is class2 tail
var class1Count int
var class2Count int
var class2MaxSeq int
for _, res := range result {
switch res {
case "class1":
class1Count++
class2MaxSeq = 0
case "class2":
class2Count++
class2MaxSeq++
require.Less(t, class2MaxSeq, 3) // not expected to have more than 2 class2 requests scheduled in row
default:
require.Fail(t, "unknown tag")
}
}
require.True(t, (class1Count*100)/(class1Count+class2Count) == 66)
}
var _ clock = &noopClock{}
type noopClock struct {
v float64
}
func (n *noopClock) now() float64 {
return n.v
}
func (n *noopClock) runAt(ts float64, f func()) {}
func (n *noopClock) close() {}
func TestMClockRequestCancel(t *testing.T) {
t.Parallel()
q := NewMClockQueue(1, math.MaxUint64, map[string]TagInfo{
"class1": {shares: 2},
"class2": {shares: 1},
}, 100)
q.clock = &noopClock{}
release1, err := q.RequestArrival(context.Background(), "class1")
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
defer cancel()
release2, err := q.RequestArrival(ctx, "class1")
require.Nil(t, release2)
require.ErrorIs(t, err, context.DeadlineExceeded)
require.Equal(t, 0, q.readyQueue.Len())
require.Equal(t, 0, q.sharesQueue.Len())
require.Equal(t, 0, q.limitQueue.Len())
require.Equal(t, 0, q.reservationQueue.Len())
release1()
}
func TestMClockLimitScheduling(t *testing.T) {
t.Parallel()
reqCount := 100
reqCount = (reqCount / 2) * 2
limit := 1.0
cl := &noopClock{}
q := NewMClockQueue(1, math.MaxUint64, map[string]TagInfo{
"class1": {shares: 2, limit: &limit},
"class2": {shares: 1, limit: &limit},
}, 100)
q.clock = cl
var releases []Release
var requests []*request
tag := "class1"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
tag = "class2"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
q.scheduleRequest(false)
for _, req := range requests {
select {
case <-req.scheduled:
require.Fail(t, "no request must be scheduled because of time is 0.0 but limit values are greater than 0.0")
default:
}
}
cl.v = math.MaxFloat64
var result []string
var wg sync.WaitGroup
for i := 0; i < reqCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
<-requests[i].scheduled
result = append(result, requests[i].tag)
releases[i]()
}()
}
q.scheduleRequest(false)
wg.Wait()
// Requests must be scheduled as class1->class1->class2->class1->class1->class2...,
// because the ratio is 2 to 1.
// However, there may be deviations due to rounding and sorting.
result = result[:reqCount/2+(reqCount/2)/2] // last reqCount/4 requests is class2 tail
var class1Count int
var class2Count int
var class2MaxSeq int
for _, res := range result {
switch res {
case "class1":
class1Count++
class2MaxSeq = 0
case "class2":
class2Count++
class2MaxSeq++
require.Less(t, class2MaxSeq, 3) // not expected to have more than 2 class2 requests scheduled in row
default:
require.Fail(t, "unknown tag")
}
}
require.True(t, (class1Count*100)/(class1Count+class2Count) == 66)
require.Equal(t, 0, q.readyQueue.Len())
require.Equal(t, 0, q.sharesQueue.Len())
require.Equal(t, 0, q.limitQueue.Len())
require.Equal(t, 0, q.reservationQueue.Len())
}
func TestMClockReservationScheduling(t *testing.T) {
t.Parallel()
reqCount := 1000
reqCount = (reqCount / 2) * 2
limit := 0.01 // 1 request in 100 seconds
resevation := 100.0 // 100 RPS
cl := &noopClock{}
q := NewMClockQueue(uint64(reqCount), math.MaxUint64, map[string]TagInfo{
"class1": {shares: 2, limit: &limit},
"class2": {shares: 1, limit: &limit, reservation: &resevation},
}, 100)
q.clock = cl
var releases []Release
var requests []*request
tag := "class1"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
tag = "class2"
for i := 0; i < reqCount/2; i++ {
req, release, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
releases = append(releases, release)
}
q.scheduleRequest(false)
for _, req := range requests {
select {
case <-req.scheduled:
require.Fail(t, "no request must be scheduled because of time is 0.0 but limit values are greater than 0.0")
default:
}
}
cl.v = 1.00001 // 1s elapsed
q.scheduleRequest(false)
var result []string
for i, req := range requests {
select {
case <-req.scheduled:
result = append(result, requests[i].tag)
releases[i]()
default:
}
}
require.Equal(t, 100, len(result))
for _, res := range result {
require.Equal(t, "class2", res)
}
cl.v = math.MaxFloat64
q.scheduleRequest(false)
require.Equal(t, 0, q.readyQueue.Len())
require.Equal(t, 0, q.sharesQueue.Len())
require.Equal(t, 0, q.limitQueue.Len())
require.Equal(t, 0, q.reservationQueue.Len())
}
func TestMClockIdleTag(t *testing.T) {
t.Parallel()
reqCount := 100
idleTimeout := 2.0
cl := &noopClock{}
q := NewMClockQueue(1, math.MaxUint64, map[string]TagInfo{
"class1": {shares: 1},
"class2": {shares: 1},
}, idleTimeout)
q.clock = cl
var requests []*request
tag := "class1"
for i := 0; i < reqCount/2; i++ {
cl.v += idleTimeout / 2
req, _, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
}
// class1 requests have shares [1.0; 2.0; 3.0; ... ]
cl.v += 2 * idleTimeout
tag = "class2"
req, _, err := q.pushRequest(tag)
require.NoError(t, err)
requests = append(requests, req)
// class2 must be defined as idle, so all shares tags must be adjusted.
for _, req := range requests {
select {
case <-req.scheduled:
default:
require.True(t, req.shares >= cl.v)
}
}
}
func TestMClockClose(t *testing.T) {
t.Parallel()
q := NewMClockQueue(1, math.MaxUint64, map[string]TagInfo{
"class1": {shares: 1},
}, 1000)
q.clock = &noopClock{}
requestRunning := make(chan struct{})
checkDone := make(chan struct{})
eg, ctx := errgroup.WithContext(context.Background())
tag := "class1"
eg.Go(func() error {
release, err := q.RequestArrival(ctx, tag)
if err != nil {
return err
}
defer release()
close(requestRunning)
<-checkDone
return nil
})
<-requestRunning
eg.Go(func() error {
release, err := q.RequestArrival(ctx, tag)
require.Nil(t, release)
require.ErrorIs(t, err, ErrSchedulerClosed)
return nil
})
// wait until second request will be blocked on wait
for q.limitQueue.Len() == 0 {
time.Sleep(1 * time.Second)
}
q.Close()
release, err := q.RequestArrival(context.Background(), tag)
require.Nil(t, release)
require.ErrorIs(t, err, ErrSchedulerClosed)
close(checkDone)
require.NoError(t, eg.Wait())
}
func TestMClockWaitLimit(t *testing.T) {
t.Parallel()
q := NewMClockQueue(1, 1, map[string]TagInfo{
"class1": {shares: 1},
}, 1000)
q.clock = &noopClock{}
defer q.Close()
requestRunning := make(chan struct{})
checkDone := make(chan struct{})
eg, ctx := errgroup.WithContext(context.Background())
tag := "class1"
// running request
eg.Go(func() error {
release, err := q.RequestArrival(ctx, tag)
if err != nil {
return err
}
defer release()
close(requestRunning)
<-checkDone
return nil
})
// waiting request
eg.Go(func() error {
<-requestRunning
release, err := q.RequestArrival(ctx, tag)
require.NotNil(t, release)
require.NoError(t, err)
defer release()
<-checkDone
return nil
})
// wait until second request will be waiting
for q.sharesQueue.Len() == 0 {
time.Sleep(1 * time.Second)
}
release, err := q.RequestArrival(ctx, tag)
require.Nil(t, release)
require.ErrorIs(t, err, ErrSchedulerRequestLimitExceeded)
close(checkDone)
require.NoError(t, eg.Wait())
}

View file

@ -1,8 +1,6 @@
package innerring package innerring
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -49,12 +47,12 @@ type IrFetcherWithoutNotary struct {
// InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet
// role in the sidechain. // role in the sidechain.
func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) {
return fN.cli.NeoFSAlphabetList(ctx) return fN.cli.NeoFSAlphabetList()
} }
// InnerRingKeys fetches list of innerring keys from netmap contract // InnerRingKeys fetches list of innerring keys from netmap contract
// in the sidechain. // in the sidechain.
func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) {
return f.nm.GetInnerRingList(ctx) return f.nm.GetInnerRingList()
} }

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"time" "time"
@ -11,7 +10,7 @@ import (
type ( type (
irFetcher interface { irFetcher interface {
InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) InnerRingKeys() (keys.PublicKeys, error)
} }
committeeFetcher interface { committeeFetcher interface {
@ -46,7 +45,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK
} }
} }
func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { func (s *innerRingIndexer) update() (ind indexes, err error) {
s.RLock() s.RLock()
if time.Since(s.lastAccess) < s.timeout { if time.Since(s.lastAccess) < s.timeout {
@ -63,7 +62,7 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil return s.ind, nil
} }
innerRing, err := s.irFetcher.InnerRingKeys(ctx) innerRing, err := s.irFetcher.InnerRingKeys()
if err != nil { if err != nil {
return indexes{}, err return indexes{}, err
} }
@ -82,8 +81,8 @@ func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error)
return s.ind, nil return s.ind, nil
} }
func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { func (s *innerRingIndexer) InnerRingIndex() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }
@ -91,8 +90,8 @@ func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) {
return ind.innerRingIndex, nil return ind.innerRingIndex, nil
} }
func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { func (s *innerRingIndexer) InnerRingSize() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }
@ -100,8 +99,8 @@ func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) {
return ind.innerRingSize, nil return ind.innerRingSize, nil
} }
func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { func (s *innerRingIndexer) AlphabetIndex() (int32, error) {
ind, err := s.update(ctx) ind, err := s.update()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't update index state: %w", err) return 0, fmt.Errorf("can't update index state: %w", err)
} }

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -38,15 +37,15 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(1), idx, "invalid alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(2), idx, "invalid IR index") require.Equal(t, int32(2), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(3), size, "invalid IR size") require.Equal(t, int32(3), size, "invalid IR size")
}) })
@ -57,11 +56,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
}) })
@ -72,11 +71,11 @@ func TestIndexerReturnsIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
}) })
@ -101,30 +100,30 @@ func TestIndexerCachesIndexes(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count")
require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count")
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -133,15 +132,15 @@ func TestIndexerCachesIndexes(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.NoError(t, err, "failed to get alphabet index") require.NoError(t, err, "failed to get alphabet index")
require.Equal(t, int32(-1), idx, "invalid alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.NoError(t, err, "failed to get IR index") require.NoError(t, err, "failed to get IR index")
require.Equal(t, int32(-1), idx, "invalid IR index") require.Equal(t, int32(-1), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.NoError(t, err, "failed to get IR size") require.NoError(t, err, "failed to get IR size")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -166,15 +165,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer := newInnerRingIndexer(cf, irf, key, time.Second) indexer := newInnerRingIndexer(cf, irf, key, time.Second)
idx, err := indexer.AlphabetIndex(context.Background()) idx, err := indexer.AlphabetIndex()
require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
size, err := indexer.InnerRingSize(context.Background()) size, err := indexer.InnerRingSize()
require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.ErrorContains(t, err, "test commitee error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
@ -190,15 +189,15 @@ func TestIndexerThrowsErrors(t *testing.T) {
indexer = newInnerRingIndexer(cf, irf, key, time.Second) indexer = newInnerRingIndexer(cf, irf, key, time.Second)
idx, err = indexer.AlphabetIndex(context.Background()) idx, err = indexer.AlphabetIndex()
require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.ErrorContains(t, err, "test IR error", "error from commitee not throwed")
require.Equal(t, int32(0), idx, "invalid alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index")
idx, err = indexer.InnerRingIndex(context.Background()) idx, err = indexer.InnerRingIndex()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), idx, "invalid IR index") require.Equal(t, int32(0), idx, "invalid IR index")
size, err = indexer.InnerRingSize(context.Background()) size, err = indexer.InnerRingSize()
require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.ErrorContains(t, err, "test IR error", "error from IR not throwed")
require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(0), size, "invalid IR size")
} }
@ -220,7 +219,7 @@ type testIRFetcher struct {
calls atomic.Int32 calls atomic.Int32
} }
func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
f.calls.Add(1) f.calls.Add(1)
return f.keys, f.err return f.keys, f.err
} }

View file

@ -38,7 +38,10 @@ import (
func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
alphaSync event.Handler, alphaSync event.Handler,
) error { ) error {
locodeValidator := s.newLocodeValidator(cfg) locodeValidator, err := s.newLocodeValidator(cfg)
if err != nil {
return err
}
netSettings := (*networkSettings)(s.netmapClient) netSettings := (*networkSettings)(s.netmapClient)
@ -48,7 +51,6 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
poolSize := cfg.GetInt("workers.netmap") poolSize := cfg.GetInt("workers.netmap")
s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize))
var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{ s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log, Log: s.log,
Metrics: s.irMetrics, Metrics: s.irMetrics,

View file

@ -575,19 +575,19 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe
func (s *Server) initConfigFromBlockchain(ctx context.Context) error { func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
// get current epoch // get current epoch
epoch, err := s.netmapClient.Epoch(ctx) epoch, err := s.netmapClient.Epoch()
if err != nil { if err != nil {
return fmt.Errorf("can't read epoch number: %w", err) return fmt.Errorf("can't read epoch number: %w", err)
} }
// get current epoch duration // get current epoch duration
epochDuration, err := s.netmapClient.EpochDuration(ctx) epochDuration, err := s.netmapClient.EpochDuration()
if err != nil { if err != nil {
return fmt.Errorf("can't read epoch duration: %w", err) return fmt.Errorf("can't read epoch duration: %w", err)
} }
// get balance precision // get balance precision
balancePrecision, err := s.balanceClient.Decimals(ctx) balancePrecision, err := s.balanceClient.Decimals()
if err != nil { if err != nil {
return fmt.Errorf("can't read balance contract precision: %w", err) return fmt.Errorf("can't read balance contract precision: %w", err)
} }
@ -597,7 +597,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
s.precision.SetBalancePrecision(balancePrecision) s.precision.SetBalancePrecision(balancePrecision)
// get next epoch delta tick // get next epoch delta tick
s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) s.initialEpochTickDelta, err = s.nextEpochBlockDelta()
if err != nil { if err != nil {
return err return err
} }
@ -613,8 +613,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error {
return nil return nil
} }
func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { func (s *Server) nextEpochBlockDelta() (uint32, error) {
epochBlock, err := s.netmapClient.LastEpochBlock(ctx) epochBlock, err := s.netmapClient.LastEpochBlock()
if err != nil { if err != nil {
return 0, fmt.Errorf("can't read last epoch block: %w", err) return 0, fmt.Errorf("can't read last epoch block: %w", err)
} }

View file

@ -9,7 +9,7 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) {
locodeDB := locodebolt.New(locodebolt.Prm{ locodeDB := locodebolt.New(locodebolt.Prm{
Path: cfg.GetString("locode.db.path"), Path: cfg.GetString("locode.db.path"),
}, },
@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator {
return irlocode.New(irlocode.Prm{ return irlocode.New(irlocode.Prm{
DB: (*locodeBoltDBWrapper)(locodeDB), DB: (*locodeBoltDBWrapper)(locodeDB),
}) }), nil
} }
type locodeBoltEntryWrapper struct { type locodeBoltEntryWrapper struct {

View file

@ -1,7 +1,6 @@
package innerring package innerring
import ( import (
"context"
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@ -18,8 +17,8 @@ type networkSettings netmapclient.Client
// MaintenanceModeAllowed requests network configuration from the Sidechain // MaintenanceModeAllowed requests network configuration from the Sidechain
// and check allowance of storage node's maintenance mode according to it. // and check allowance of storage node's maintenance mode according to it.
// Always returns state.ErrMaintenanceModeDisallowed. // Always returns state.ErrMaintenanceModeDisallowed.
func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { func (s *networkSettings) MaintenanceModeAllowed() error {
allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed()
if err != nil { if err != nil {
return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err)
} else if allowed { } else if allowed {

View file

@ -279,6 +279,6 @@ type testNetmapClient struct {
netmap *netmap.NetMap netmap *netmap.NetMap
} }
func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil return c.netmap, nil
} }

View file

@ -44,7 +44,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
return true return true
} }
networkMap, err := ap.netmapClient.NetMap(ctx) networkMap, err := ap.netmapClient.NetMap()
if err != nil { if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.Error(err)) zap.Error(err))

View file

@ -36,7 +36,7 @@ type (
} }
netmapClient interface { netmapClient interface {
NetMap(ctx context.Context) (*netmap.NetMap, error) NetMap() (*netmap.NetMap, error)
} }
morphClient interface { morphClient interface {

View file

@ -1,7 +1,6 @@
package container package container
import ( import (
"context"
"crypto/ecdsa" "crypto/ecdsa"
"errors" "errors"
"fmt" "fmt"
@ -46,7 +45,7 @@ type signatureVerificationData struct {
// - v.binPublicKey is a public session key // - v.binPublicKey is a public session key
// - session context corresponds to the container and verb in v // - session context corresponds to the container and verb in v
// - session is "alive" // - session is "alive"
func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { func (cp *Processor) verifySignature(v signatureVerificationData) error {
var err error var err error
var key frostfsecdsa.PublicKeyRFC6979 var key frostfsecdsa.PublicKeyRFC6979
keyProvided := v.binPublicKey != nil keyProvided := v.binPublicKey != nil
@ -59,7 +58,7 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
} }
if len(v.binTokenSession) > 0 { if len(v.binTokenSession) > 0 {
return cp.verifyByTokenSession(ctx, v, &key, keyProvided) return cp.verifyByTokenSession(v, &key, keyProvided)
} }
if keyProvided { if keyProvided {
@ -78,8 +77,8 @@ func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificatio
return errors.New("signature is invalid or calculated with the key not bound to the container owner") return errors.New("signature is invalid or calculated with the key not bound to the container owner")
} }
func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { func (cp *Processor) checkTokenLifetime(token session.Container) error {
curEpoch, err := cp.netState.Epoch(ctx) curEpoch, err := cp.netState.Epoch()
if err != nil { if err != nil {
return fmt.Errorf("could not read current epoch: %w", err) return fmt.Errorf("could not read current epoch: %w", err)
} }
@ -91,7 +90,7 @@ func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Conta
return nil return nil
} }
func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error {
var tok session.Container var tok session.Container
err := tok.Unmarshal(v.binTokenSession) err := tok.Unmarshal(v.binTokenSession)
@ -119,7 +118,7 @@ func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerifi
return errors.New("owner differs with token owner") return errors.New("owner differs with token owner")
} }
err = cp.checkTokenLifetime(ctx, tok) err = cp.checkTokenLifetime(tok)
if err != nil { if err != nil {
return fmt.Errorf("check session lifetime: %w", err) return fmt.Errorf("check session lifetime: %w", err)
} }

View file

@ -170,11 +170,11 @@ type testNetworkState struct {
epoch uint64 epoch uint64
} }
func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) {
return s.homHashDisabled, nil return s.homHashDisabled, nil
} }
func (s *testNetworkState) Epoch(context.Context) (uint64, error) { func (s *testNetworkState) Epoch() (uint64, error) {
return s.epoch, nil return s.epoch, nil
} }
@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 {
return c.contractAddress return c.contractAddress
} }
func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) {
key := hex.EncodeToString(cid) key := hex.EncodeToString(cid)
if cont, found := c.get[key]; found { if cont, found := c.get[key]; found {
return cont, nil return cont, nil
@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction)
type testFrostFSIDClient struct{} type testFrostFSIDClient struct{}
func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) {
return &frostfsidclient.Subject{}, nil return &frostfsidclient.Subject{}, nil
} }

View file

@ -47,7 +47,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
e: put, e: put,
} }
err := cp.checkPutContainer(ctx, pctx) err := cp.checkPutContainer(pctx)
if err != nil { if err != nil {
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.Error(err), zap.Error(err),
@ -66,8 +66,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
return true return true
} }
func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { func (cp *Processor) checkPutContainer(ctx *putContainerContext) error {
binCnr := pctx.e.Container() binCnr := ctx.e.Container()
var cnr containerSDK.Container var cnr containerSDK.Container
err := cnr.Unmarshal(binCnr) err := cnr.Unmarshal(binCnr)
@ -75,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
return fmt.Errorf("invalid binary container: %w", err) return fmt.Errorf("invalid binary container: %w", err)
} }
err = cp.verifySignature(ctx, signatureVerificationData{ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Owner(), ownerContainer: cnr.Owner(),
verb: session.VerbContainerPut, verb: session.VerbContainerPut,
binTokenSession: pctx.e.SessionToken(), binTokenSession: ctx.e.SessionToken(),
binPublicKey: pctx.e.PublicKey(), binPublicKey: ctx.e.PublicKey(),
signature: pctx.e.Signature(), signature: ctx.e.Signature(),
signedData: binCnr, signedData: binCnr,
}) })
if err != nil { if err != nil {
@ -88,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerCo
} }
// check homomorphic hashing setting // check homomorphic hashing setting
err = checkHomomorphicHashing(ctx, cp.netState, cnr) err = checkHomomorphicHashing(cp.netState, cnr)
if err != nil { if err != nil {
return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) return fmt.Errorf("incorrect homomorphic hashing setting: %w", err)
} }
// check native name and zone // check native name and zone
err = cp.checkNNS(ctx, pctx, cnr) err = cp.checkNNS(ctx, cnr)
if err != nil { if err != nil {
return fmt.Errorf("NNS: %w", err) return fmt.Errorf("NNS: %w", err)
} }
@ -110,7 +110,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true return true
} }
err := cp.checkDeleteContainer(ctx, e) err := cp.checkDeleteContainer(e)
if err != nil { if err != nil {
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.Error(err), zap.Error(err),
@ -130,7 +130,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
return true return true
} }
func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error {
binCnr := e.ContainerID() binCnr := e.ContainerID()
var idCnr cid.ID var idCnr cid.ID
@ -141,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
} }
// receive owner of the related container // receive owner of the related container
cnr, err := cp.cnrClient.Get(ctx, binCnr) cnr, err := cp.cnrClient.Get(binCnr)
if err != nil { if err != nil {
return fmt.Errorf("could not receive the container: %w", err) return fmt.Errorf("could not receive the container: %w", err)
} }
err = cp.verifySignature(ctx, signatureVerificationData{ err = cp.verifySignature(signatureVerificationData{
ownerContainer: cnr.Value.Owner(), ownerContainer: cnr.Value.Owner(),
verb: session.VerbContainerDelete, verb: session.VerbContainerDelete,
idContainerSet: true, idContainerSet: true,
@ -163,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.
return nil return nil
} }
func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error {
// fetch domain info // fetch domain info
pctx.d = containerSDK.ReadDomain(cnr) ctx.d = containerSDK.ReadDomain(cnr)
// if PutNamed event => check if values in container correspond to args // if PutNamed event => check if values in container correspond to args
if named, ok := pctx.e.(interface { if named, ok := ctx.e.(interface {
Name() string Name() string
Zone() string Zone() string
}); ok { }); ok {
if name := named.Name(); name != pctx.d.Name() { if name := named.Name(); name != ctx.d.Name() {
return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) return fmt.Errorf("names differ %s/%s", name, ctx.d.Name())
} }
if zone := named.Zone(); zone != pctx.d.Zone() { if zone := named.Zone(); zone != ctx.d.Zone() {
return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone())
} }
} }
@ -186,12 +186,12 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return fmt.Errorf("could not get container owner address: %w", err) return fmt.Errorf("could not get container owner address: %w", err)
} }
subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) subject, err := cp.frostFSIDClient.GetSubject(addr)
if err != nil { if err != nil {
return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) return fmt.Errorf("could not get subject from FrostfsID contract: %w", err)
} }
namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns")
if !hasNamespace { if !hasNamespace {
return nil return nil
} }
@ -203,8 +203,8 @@ func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cn
return nil return nil
} }
func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error {
netSetting, err := ns.HomomorphicHashDisabled(ctx) netSetting, err := ns.HomomorphicHashDisabled()
if err != nil { if err != nil {
return fmt.Errorf("could not get setting in contract: %w", err) return fmt.Errorf("could not get setting in contract: %w", err)
} }

View file

@ -25,7 +25,7 @@ type (
ContClient interface { ContClient interface {
ContractAddress() util.Uint160 ContractAddress() util.Uint160
Get(ctx context.Context, cid []byte) (*containercore.Container, error) Get(cid []byte) (*containercore.Container, error)
} }
MorphClient interface { MorphClient interface {
@ -33,7 +33,7 @@ type (
} }
FrostFSIDClient interface { FrostFSIDClient interface {
GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error)
} }
// Processor of events produced by container contract in the sidechain. // Processor of events produced by container contract in the sidechain.
@ -68,7 +68,7 @@ type NetworkState interface {
// //
// Must return any error encountered // Must return any error encountered
// which did not allow reading the value. // which did not allow reading the value.
Epoch(ctx context.Context) (uint64, error) Epoch() (uint64, error)
// HomomorphicHashDisabled must return boolean that // HomomorphicHashDisabled must return boolean that
// represents homomorphic network state: // represents homomorphic network state:
@ -76,7 +76,7 @@ type NetworkState interface {
// * false if hashing is enabled. // * false if hashing is enabled.
// //
// which did not allow reading the value. // which did not allow reading the value.
HomomorphicHashDisabled(ctx context.Context) (bool, error) HomomorphicHashDisabled() (bool, error)
} }
// New creates a container contract processor instance. // New creates a container contract processor instance.

View file

@ -236,7 +236,7 @@ type testIRFetcher struct {
publicKeys keys.PublicKeys publicKeys keys.PublicKeys
} }
func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) {
return f.publicKeys, nil return f.publicKeys, nil
} }
@ -266,7 +266,7 @@ type testMainnetClient struct {
designateHash util.Uint160 designateHash util.Uint160
} }
func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
return c.alphabetKeys, nil return c.alphabetKeys, nil
} }

View file

@ -25,7 +25,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
return true return true
} }
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil { if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.Error(err)) zap.Error(err))
@ -95,7 +95,7 @@ func prettyKeys(keys keys.PublicKeys) string {
} }
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
innerRing, err := gp.irFetcher.InnerRingKeys(ctx) innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil { if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.Error(err)) zap.Error(err))

View file

@ -52,7 +52,7 @@ type (
// Implementation must take into account availability of // Implementation must take into account availability of
// the notary contract. // the notary contract.
IRFetcher interface { IRFetcher interface {
InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) InnerRingKeys() (keys.PublicKeys, error)
} }
FrostFSClient interface { FrostFSClient interface {
@ -64,7 +64,7 @@ type (
} }
MainnetClient interface { MainnetClient interface {
NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) NeoFSAlphabetList() (res keys.PublicKeys, err error)
GetDesignateHash() util.Uint160 GetDesignateHash() util.Uint160
} }

View file

@ -294,7 +294,7 @@ type testNodeStateSettings struct {
maintAllowed bool maintAllowed bool
} }
func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { func (s *testNodeStateSettings) MaintenanceModeAllowed() error {
if s.maintAllowed { if s.maintAllowed {
return nil return nil
} }
@ -303,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error {
type testValidator struct{} type testValidator struct{}
func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error {
return nil return nil
} }
@ -381,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 {
return c.contractAddress return c.contractAddress
} }
func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { func (c *testNetmapClient) EpochDuration() (uint64, error) {
return c.epochDuration, nil return c.epochDuration, nil
} }
@ -392,7 +392,7 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) {
return 0, fmt.Errorf("not found") return 0, fmt.Errorf("not found")
} }
func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) {
return c.netmap, nil return c.netmap, nil
} }

View file

@ -1,7 +1,6 @@
package locode package locode
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
@ -30,7 +29,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
// - Continent: R.Continent().String(). // - Continent: R.Continent().String().
// //
// UN-LOCODE attribute remains untouched. // UN-LOCODE attribute remains untouched.
func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
attrLocode := n.LOCODE() attrLocode := n.LOCODE()
if attrLocode == "" { if attrLocode == "" {
return nil return nil

View file

@ -1,7 +1,6 @@
package locode_test package locode_test
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
@ -93,7 +92,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
t.Run("w/o locode", func(t *testing.T) { t.Run("w/o locode", func(t *testing.T) {
n := nodeInfoWithSomeAttrs() n := nodeInfoWithSomeAttrs()
err := validator.VerifyAndUpdate(context.Background(), n) err := validator.VerifyAndUpdate(n)
require.NoError(t, err) require.NoError(t, err)
}) })
@ -103,7 +102,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttrValue(n, "WRONG LOCODE") addLocodeAttrValue(n, "WRONG LOCODE")
err := validator.VerifyAndUpdate(context.Background(), n) err := validator.VerifyAndUpdate(n)
require.Error(t, err) require.Error(t, err)
}) })
@ -112,7 +111,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"})
err := validator.VerifyAndUpdate(context.Background(), n) err := validator.VerifyAndUpdate(n)
require.Error(t, err) require.Error(t, err)
}) })
@ -120,7 +119,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
addLocodeAttr(n, r.LOCODE) addLocodeAttr(n, r.LOCODE)
err := validator.VerifyAndUpdate(context.Background(), n) err := validator.VerifyAndUpdate(n)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))

View file

@ -1,7 +1,6 @@
package maddress package maddress
import ( import (
"context"
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@ -9,7 +8,7 @@ import (
) )
// VerifyAndUpdate calls network.VerifyAddress. // VerifyAndUpdate calls network.VerifyAddress.
func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
err := network.VerifyMultiAddress(*n) err := network.VerifyMultiAddress(*n)
if err != nil { if err != nil {
return fmt.Errorf("could not verify multiaddress: %w", err) return fmt.Errorf("could not verify multiaddress: %w", err)

View file

@ -7,7 +7,6 @@ map candidates.
package state package state
import ( import (
"context"
"errors" "errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -24,7 +23,7 @@ type NetworkSettings interface {
// no error if allowed; // no error if allowed;
// ErrMaintenanceModeDisallowed if disallowed; // ErrMaintenanceModeDisallowed if disallowed;
// other error if there are any problems with the check. // other error if there are any problems with the check.
MaintenanceModeAllowed(ctx context.Context) error MaintenanceModeAllowed() error
} }
// NetMapCandidateValidator represents tool which checks state of nodes which // NetMapCandidateValidator represents tool which checks state of nodes which
@ -56,13 +55,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
// MUST NOT be called before SetNetworkSettings. // MUST NOT be called before SetNetworkSettings.
// //
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
if node.Status().IsOnline() { if node.Status().IsOnline() {
return nil return nil
} }
if node.Status().IsMaintenance() { if node.Status().IsMaintenance() {
return x.netSettings.MaintenanceModeAllowed(ctx) return x.netSettings.MaintenanceModeAllowed()
} }
return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE")

View file

@ -1,7 +1,6 @@
package state_test package state_test
import ( import (
"context"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
@ -14,7 +13,7 @@ type testNetworkSettings struct {
disallowed bool disallowed bool
} }
func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { func (x testNetworkSettings) MaintenanceModeAllowed() error {
if x.disallowed { if x.disallowed {
return state.ErrMaintenanceModeDisallowed return state.ErrMaintenanceModeDisallowed
} }
@ -82,7 +81,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
testCase.validatorPreparer(&v) testCase.validatorPreparer(&v)
} }
err := v.VerifyAndUpdate(context.Background(), &node) err := v.VerifyAndUpdate(&node)
if testCase.valid { if testCase.valid {
require.NoError(t, err, testCase.name) require.NoError(t, err, testCase.name)

View file

@ -1,8 +1,6 @@
package nodevalidation package nodevalidation
import ( import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
) )
@ -28,9 +26,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator {
// VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. // VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators.
// //
// If error appears, returns it immediately. // If error appears, returns it immediately.
func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error {
for _, v := range c.validators { for _, v := range c.validators {
if err := v.VerifyAndUpdate(ctx, ni); err != nil { if err := v.VerifyAndUpdate(ni); err != nil {
return err return err
} }
} }

View file

@ -14,7 +14,7 @@ import (
func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool {
epoch := ev.EpochNumber() epoch := ev.EpochNumber()
epochDuration, err := np.netmapClient.EpochDuration(ctx) epochDuration, err := np.netmapClient.EpochDuration()
if err != nil { if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
zap.Error(err)) zap.Error(err))
@ -37,7 +37,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
} }
// get new netmap snapshot // get new netmap snapshot
networkMap, err := np.netmapClient.NetMap(ctx) networkMap, err := np.netmapClient.NetMap()
if err != nil { if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.Error(err)) zap.Error(err))

View file

@ -39,7 +39,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
} }
// validate and update node info // validate and update node info
err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil { if err != nil {
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.Error(err), zap.Error(err),
@ -108,7 +108,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat
var err error var err error
if ev.Maintenance() { if ev.Maintenance() {
err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil { if err != nil {
np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err), zap.Error(err),

View file

@ -49,15 +49,15 @@ type (
// //
// If no error occurs, the parameter must point to the // If no error occurs, the parameter must point to the
// ready-made NodeInfo structure. // ready-made NodeInfo structure.
VerifyAndUpdate(context.Context, *netmap.NodeInfo) error VerifyAndUpdate(*netmap.NodeInfo) error
} }
Client interface { Client interface {
MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error
ContractAddress() util.Uint160 ContractAddress() util.Uint160
EpochDuration(ctx context.Context) (uint64, error) EpochDuration() (uint64, error)
MorphTxHeight(h util.Uint256) (res uint32, err error) MorphTxHeight(h util.Uint256) (res uint32, err error)
NetMap(ctx context.Context) (*netmap.NetMap, error) NetMap() (*netmap.NetMap, error)
NewEpoch(ctx context.Context, epoch uint64) error NewEpoch(ctx context.Context, epoch uint64) error
MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error)
MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error

View file

@ -34,16 +34,16 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 {
return w.netmapClient.ContractAddress() return w.netmapClient.ContractAddress()
} }
func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { func (w *netmapClientWrapper) EpochDuration() (uint64, error) {
return w.netmapClient.EpochDuration(ctx) return w.netmapClient.EpochDuration()
} }
func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) {
return w.netmapClient.Morph().TxHeight(h) return w.netmapClient.Morph().TxHeight(h)
} }
func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) {
return w.netmapClient.NetMap(ctx) return w.netmapClient.NetMap()
} }
func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error {

View file

@ -60,7 +60,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
// InnerRingIndex is a getter for a global index of node in inner ring list. Negative // InnerRingIndex is a getter for a global index of node in inner ring list. Negative
// index means that node is not in the inner ring list. // index means that node is not in the inner ring list.
func (s *Server) InnerRingIndex(ctx context.Context) int { func (s *Server) InnerRingIndex(ctx context.Context) int {
index, err := s.statusIndex.InnerRingIndex(ctx) index, err := s.statusIndex.InnerRingIndex()
if err != nil { if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
return -1 return -1
@ -72,7 +72,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
// InnerRingSize is a getter for a global size of inner ring list. This value // InnerRingSize is a getter for a global size of inner ring list. This value
// paired with inner ring index. // paired with inner ring index.
func (s *Server) InnerRingSize(ctx context.Context) int { func (s *Server) InnerRingSize(ctx context.Context) int {
size, err := s.statusIndex.InnerRingSize(ctx) size, err := s.statusIndex.InnerRingSize()
if err != nil { if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
return 0 return 0
@ -84,7 +84,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
// AlphabetIndex is a getter for a global index of node in alphabet list. // AlphabetIndex is a getter for a global index of node in alphabet list.
// Negative index means that node is not in the alphabet list. // Negative index means that node is not in the alphabet list.
func (s *Server) AlphabetIndex(ctx context.Context) int { func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex(ctx) index, err := s.statusIndex.AlphabetIndex()
if err != nil { if err != nil {
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
return -1 return -1

View file

@ -6,6 +6,7 @@ import (
"syscall" "syscall"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -93,6 +94,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(dataSize)), zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
) )
b.itemDeleted(recordSize) b.itemDeleted(recordSize)
} }

Some files were not shown because too many files have changed in this diff Show more