Compare commits

..

3 commits

Author SHA1 Message Date
d42f67e053 [#1656] qos: Add tests for AdjustOutgoingIOTag Interceptors
Change-Id: If534e756b26cf7f202039d48ecdf554b4283728b
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2025-03-26 09:24:25 +03:00
99340b2717 [#1656] qos: Add test for SetCriticalIOTag Interceptor
Change-Id: I4a55fcb84e6f65408a1c0120ac917e49e23354a1
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2025-03-26 09:24:22 +03:00
c16788f9c6 [#1656] qos: Add tests for MaxActiveRPCLimiter Interceptors
Change-Id: Ib65890ae5aec34c34e15d4ec1f05952f74f1ad26
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2025-03-26 09:24:19 +03:00
132 changed files with 1173 additions and 3162 deletions

2
.ci/Jenkinsfile vendored
View file

@ -79,3 +79,5 @@ async {
}
}
}
// TODO: dco check

View file

@ -1,103 +1,101 @@
version: "2"
# This file contains all available configuration options
# with their default values.
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 20m
# include test files or not, default is true
tests: false
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
tab:
path: stdout
colors: false
- format: tab
# all available settings of specific linters
linters-settings:
exhaustive:
# indicates that switch statements are to be considered exhaustive if a
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
gci:
sections:
- standard
- default
custom-order: true
govet:
# report about shadowed variables
check-shadowing: false
staticcheck:
checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed.
funlen:
lines: 80 # default 60
statements: 60 # default 40
gocognit:
min-complexity: 40 # default 30
importas:
no-unaliased: true
no-extra-aliases: false
alias:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
unused:
field-writes-are-uses: false
exported-fields-are-used: false
local-variables-are-used: false
custom:
truecloudlab-linters:
path: bin/linters/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
target-methods : ["reportFlushError", "reportError"]
disable-packages: ["codes", "err", "res","exec"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
linters:
default: none
enable:
- bidichk
- containedctx
- contextcheck
- copyloopvar
- durationcheck
- errcheck
- exhaustive
- funlen
- gocognit
- godot
- importas
- ineffassign
- intrange
- misspell
- perfsprint
- predeclared
- protogetter
- reassign
# mandatory linters
- govet
- revive
# some default golangci-lint linters
- errcheck
- gosimple
- godot
- ineffassign
- staticcheck
- testifylint
- truecloudlab-linters
- unconvert
- unparam
- typecheck
- unused
- usetesting
- whitespace
settings:
exhaustive:
default-signifies-exhaustive: true
funlen:
lines: 80
statements: 60
gocognit:
min-complexity: 40
importas:
alias:
- pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
no-unaliased: true
no-extra-aliases: false
staticcheck:
checks:
- all
- -QF1002
unused:
field-writes-are-uses: false
exported-fields-are-used: false
local-variables-are-used: false
custom:
truecloudlab-linters:
path: bin/linters/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs
disable-packages:
- codes
- err
- res
- exec
target-methods:
- reportFlushError
- reportError
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
# extra linters
- bidichk
- durationcheck
- exhaustive
- copyloopvar
- gci
- gofmt
- goimports
settings:
gci:
sections:
- standard
- default
custom-order: true
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- misspell
- predeclared
- reassign
- whitespace
- containedctx
- funlen
- gocognit
- contextcheck
- importas
- truecloudlab-linters
- perfsprint
- testifylint
- protogetter
- intrange
- tenv
- unconvert
- unparam
disable-all: true
fast: false

View file

@ -9,8 +9,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.23
LINT_VERSION ?= 2.0.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
LINT_VERSION ?= 1.62.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
PROTOC_OS_VERSION=osx-x86_64
@ -224,7 +224,7 @@ lint-install: $(BIN)
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION)
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:

View file

@ -1,15 +0,0 @@
package maintenance
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie"
"github.com/spf13/cobra"
)
var RootCmd = &cobra.Command{
Use: "maintenance",
Short: "Section for maintenance commands",
}
func init() {
RootCmd.AddCommand(zombie.Cmd)
}

View file

@ -1,70 +0,0 @@
package zombie
import (
"crypto/ecdsa"
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey {
keyDesc := viper.GetString(walletFlag)
if keyDesc == "" {
return &nodeconfig.Key(appCfg).PrivateKey
}
data, err := os.ReadFile(keyDesc)
commonCmd.ExitOnErr(cmd, "open wallet file: %w", err)
priv, err := keys.NewPrivateKeyFromBytes(data)
if err != nil {
w, err := wallet.NewWalletFromFile(keyDesc)
commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err)
return fromWallet(cmd, w, viper.GetString(addressFlag))
}
return &priv.PrivateKey
}
func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey {
var (
addr util.Uint160
err error
)
if addrStr == "" {
addr = w.GetChangeAddress()
} else {
addr, err = flags.ParseAddress(addrStr)
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err)
}
acc := w.GetAccount(addr)
if acc == nil {
commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr))
}
pass, err := getPassword()
commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err)
commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams()))
return &acc.PrivateKey().PrivateKey
}
func getPassword() (string, error) {
// this check allows empty passwords
if viper.IsSet("password") {
return viper.GetString("password"), nil
}
return input.ReadPassword("Enter password > ")
}

View file

@ -1,31 +0,0 @@
package zombie
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func list(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
var containerID *cid.ID
if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" {
containerID = &cid.ID{}
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
}
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error {
if containerID != nil && a.Container() != *containerID {
return nil
}
cmd.Println(a.EncodeToString())
return nil
}))
}

View file

@ -1,46 +0,0 @@
package zombie
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/spf13/cobra"
)
func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client {
addresses := morphconfig.RPCEndpoint(appCfg)
if len(addresses) == 0 {
commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found"))
}
key := nodeconfig.Key(appCfg)
cli, err := client.New(cmd.Context(),
key,
client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
client.WithEndpoints(addresses...),
client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
)
commonCmd.ExitOnErr(cmd, "create morph client: %w", err)
return cli
}
func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client {
hs, err := morph.NNSContractAddress(client.NNSContainerContractName)
commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err)
cc, err := cntClient.NewFromMorph(morph, hs, 0)
commonCmd.ExitOnErr(cmd, "create morph container client: %w", err)
return cc
}
func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client {
hs, err := morph.NNSContractAddress(client.NNSNetmapContractName)
commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err)
cli, err := netmapClient.NewFromMorph(morph, hs, 0)
commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err)
return cli
}

View file

@ -1,154 +0,0 @@
package zombie
import (
"context"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"sync"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
type quarantine struct {
// mtx protects current field.
mtx sync.Mutex
current int
trees []*fstree.FSTree
}
func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine {
var paths []string
for _, sh := range engineInfo.Shards {
var storagePaths []string
for _, st := range sh.BlobStorInfo.SubStorages {
storagePaths = append(storagePaths, st.Path)
}
if len(storagePaths) == 0 {
continue
}
paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine"))
}
q, err := newQuarantine(paths)
commonCmd.ExitOnErr(cmd, "create quarantine: %w", err)
return q
}
func commonPath(paths []string) string {
if len(paths) == 0 {
return ""
}
if len(paths) == 1 {
return paths[0]
}
minLen := math.MaxInt
for _, p := range paths {
if len(p) < minLen {
minLen = len(p)
}
}
var sb strings.Builder
for i := range minLen {
for _, path := range paths[1:] {
if paths[0][i] != path[i] {
return sb.String()
}
}
sb.WriteByte(paths[0][i])
}
return sb.String()
}
func newQuarantine(paths []string) (*quarantine, error) {
var q quarantine
for i := range paths {
f := fstree.New(
fstree.WithDepth(1),
fstree.WithDirNameLen(1),
fstree.WithPath(paths[i]),
fstree.WithPerm(os.ModePerm),
)
if err := f.Open(mode.ComponentReadWrite); err != nil {
return nil, fmt.Errorf("open fstree %s: %w", paths[i], err)
}
if err := f.Init(); err != nil {
return nil, fmt.Errorf("init fstree %s: %w", paths[i], err)
}
q.trees = append(q.trees, f)
}
return &q, nil
}
func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) {
for i := range q.trees {
res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a})
if err != nil {
continue
}
return res.Object, nil
}
return nil, &apistatus.ObjectNotFound{}
}
func (q *quarantine) Delete(ctx context.Context, a oid.Address) error {
for i := range q.trees {
_, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a})
if err != nil {
continue
}
return nil
}
return &apistatus.ObjectNotFound{}
}
func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error {
data, err := obj.Marshal()
if err != nil {
return err
}
var prm common.PutPrm
prm.Address = objectcore.AddressOf(obj)
prm.Object = obj
prm.RawData = data
q.mtx.Lock()
current := q.current
q.current = (q.current + 1) % len(q.trees)
q.mtx.Unlock()
_, err = q.trees[current].Put(ctx, prm)
return err
}
func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error {
var prm common.IteratePrm
prm.Handler = func(elem common.IterationElement) error {
return f(elem.Address)
}
for i := range q.trees {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
_, err := q.trees[i].Iterate(ctx, prm)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,55 +0,0 @@
package zombie
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func remove(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
var containerID cid.ID
cidStr, _ := cmd.Flags().GetString(cidFlag)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
var objectID *oid.ID
oidStr, _ := cmd.Flags().GetString(oidFlag)
if oidStr != "" {
objectID = &oid.ID{}
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
}
if objectID != nil {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(*objectID)
removeObject(cmd, q, addr)
} else {
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
if addr.Container() != containerID {
return nil
}
removeObject(cmd, q, addr)
return nil
}))
}
}
func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) {
err := q.Delete(cmd.Context(), addr)
if errors.Is(err, new(apistatus.ObjectNotFound)) {
return
}
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err)
}

View file

@ -1,69 +0,0 @@
package zombie
import (
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
func restore(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
storageEngine := newEngine(cmd, appCfg)
q := createQuarantine(cmd, storageEngine.DumpInfo())
morphClient := createMorphClient(cmd, appCfg)
cnrCli := createContainerClient(cmd, morphClient)
var containerID cid.ID
cidStr, _ := cmd.Flags().GetString(cidFlag)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr))
var objectID *oid.ID
oidStr, _ := cmd.Flags().GetString(oidFlag)
if oidStr != "" {
objectID = &oid.ID{}
commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr))
}
if objectID != nil {
var addr oid.Address
addr.SetContainer(containerID)
addr.SetObject(*objectID)
restoreObject(cmd, storageEngine, q, addr, cnrCli)
} else {
commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error {
if addr.Container() != containerID {
return nil
}
restoreObject(cmd, storageEngine, q, addr, cnrCli)
return nil
}))
}
}
func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) {
obj, err := q.Get(cmd.Context(), addr)
commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err)
rawCID := make([]byte, sha256.Size)
cid := addr.Container()
cid.Encode(rawCID)
cnr, err := cnrCli.Get(cmd.Context(), rawCID)
commonCmd.ExitOnErr(cmd, "get container: %w", err)
putPrm := engine.PutPrm{
Object: obj,
IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value),
}
commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm))
commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr))
}

View file

@ -1,123 +0,0 @@
package zombie
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
flagBatchSize = "batch-size"
flagBatchSizeUsage = "Objects iteration batch size"
cidFlag = "cid"
cidFlagUsage = "Container ID"
oidFlag = "oid"
oidFlagUsage = "Object ID"
walletFlag = "wallet"
walletFlagShorthand = "w"
walletFlagUsage = "Path to the wallet or binary key"
addressFlag = "address"
addressFlagUsage = "Address of wallet account"
moveFlag = "move"
moveFlagUsage = "Move objects from storage engine to quarantine"
)
var (
Cmd = &cobra.Command{
Use: "zombie",
Short: "Zombie objects related commands",
}
scanCmd = &cobra.Command{
Use: "scan",
Short: "Scan storage engine for zombie objects and move them to quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag))
_ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag))
_ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize))
_ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag))
},
Run: scan,
}
listCmd = &cobra.Command{
Use: "list",
Short: "List zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
},
Run: list,
}
restoreCmd = &cobra.Command{
Use: "restore",
Short: "Restore zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
},
Run: restore,
}
removeCmd = &cobra.Command{
Use: "remove",
Short: "Remove zombie objects from quarantine",
Long: "",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag))
_ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag))
_ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag))
_ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag))
},
Run: remove,
}
)
func init() {
initScanCmd()
initListCmd()
initRestoreCmd()
initRemoveCmd()
}
func initScanCmd() {
Cmd.AddCommand(scanCmd)
scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage)
scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage)
scanCmd.Flags().String(addressFlag, "", addressFlagUsage)
scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage)
}
func initListCmd() {
Cmd.AddCommand(listCmd)
listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
listCmd.Flags().String(cidFlag, "", cidFlagUsage)
}
func initRestoreCmd() {
Cmd.AddCommand(restoreCmd)
restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
restoreCmd.Flags().String(cidFlag, "", cidFlagUsage)
restoreCmd.Flags().String(oidFlag, "", oidFlagUsage)
}
func initRemoveCmd() {
Cmd.AddCommand(removeCmd)
removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage)
removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage)
removeCmd.Flags().String(cidFlag, "", cidFlagUsage)
removeCmd.Flags().String(oidFlag, "", oidFlagUsage)
}

View file

@ -1,281 +0,0 @@
package zombie
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"errors"
"fmt"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
func scan(cmd *cobra.Command, _ []string) {
configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag)
configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag)
appCfg := config.New(configFile, configDir, config.EnvPrefix)
batchSize, _ := cmd.Flags().GetUint32(flagBatchSize)
if batchSize == 0 {
commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value"))
}
move, _ := cmd.Flags().GetBool(moveFlag)
storageEngine := newEngine(cmd, appCfg)
morphClient := createMorphClient(cmd, appCfg)
cnrCli := createContainerClient(cmd, morphClient)
nmCli := createNetmapClient(cmd, morphClient)
q := createQuarantine(cmd, storageEngine.DumpInfo())
pk := getPrivateKey(cmd, appCfg)
epoch, err := nmCli.Epoch(cmd.Context())
commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err)
nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch)
commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err)
cmd.Printf("Epoch: %d\n", nm.Epoch())
cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes()))
ps := &processStatus{
statusCount: make(map[status]uint64),
}
stopCh := make(chan struct{})
start := time.Now()
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-cmd.Context().Done():
return
case <-stopCh:
return
case <-tick.C:
fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start))
}
}
}()
go func() {
defer wg.Done()
err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move)
close(stopCh)
}()
wg.Wait()
commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err)
cmd.Println()
cmd.Println("Status description:")
cmd.Println("undefined -- nothing is clear")
cmd.Println("found -- object is found in cluster")
cmd.Println("quarantine -- object is not found in cluster")
cmd.Println()
for status, count := range ps.statusCount {
cmd.Printf("Status: %s, Count: %d\n", status, count)
}
}
type status string
const (
statusUndefined status = "undefined"
statusFound status = "found"
statusQuarantine status = "quarantine"
)
func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) {
rawCID := make([]byte, sha256.Size)
cid := obj.Address.Container()
cid.Encode(rawCID)
cnr, err := cnrCli.Get(ctx, rawCID)
if err != nil {
var errContainerNotFound *apistatus.ContainerNotFound
if errors.As(err, &errContainerNotFound) {
// Policer will deal with this object.
return statusFound, nil
}
return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err)
}
nm, err := nmCli.NetMap(ctx)
if err != nil {
return statusUndefined, fmt.Errorf("read netmap from morph: %w", err)
}
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID)
if err != nil {
// Not enough nodes, check all netmap nodes.
nodes = append([][]netmap.NodeInfo{}, nm.Nodes())
}
objID := obj.Address.Object()
cnrID := obj.Address.Container()
local := true
raw := false
if obj.ECInfo != nil {
objID = obj.ECInfo.ParentID
local = false
raw = true
}
prm := clientSDK.PrmObjectHead{
ObjectID: &objID,
ContainerID: &cnrID,
Local: local,
Raw: raw,
}
var ni clientCore.NodeInfo
for i := range nodes {
for j := range nodes[i] {
if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil {
return statusUndefined, fmt.Errorf("parse node info: %w", err)
}
c, err := cc.Get(ni)
if err != nil {
continue
}
res, err := c.ObjectHead(ctx, prm)
if err != nil {
var errECInfo *objectSDK.ECInfoError
if raw && errors.As(err, &errECInfo) {
return statusFound, nil
}
continue
}
if err := apistatus.ErrFromStatus(res.Status()); err != nil {
continue
}
return statusFound, nil
}
}
if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 {
return statusFound, nil
}
return statusQuarantine, nil
}
func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus,
appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool,
) error {
cc := cache.NewSDKClientCache(cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
Key: pk,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
})
ctx := cmd.Context()
var cursor *engine.Cursor
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
var prm engine.ListWithCursorPrm
prm.WithCursor(cursor)
prm.WithCount(batchSize)
res, err := storageEngine.ListWithCursor(ctx, prm)
if err != nil {
if errors.Is(err, engine.ErrEndOfListing) {
return nil
}
return fmt.Errorf("list with cursor: %w", err)
}
cursor = res.Cursor()
addrList := res.AddressList()
eg, egCtx := errgroup.WithContext(ctx)
eg.SetLimit(int(batchSize))
for i := range addrList {
addr := addrList[i]
eg.Go(func() error {
result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr)
if err != nil {
return fmt.Errorf("check object %s status: %w", addr.Address, err)
}
ps.add(result)
if !move && result == statusQuarantine {
cmd.Println(addr)
return nil
}
if result == statusQuarantine {
return moveToQuarantine(egCtx, storageEngine, q, addr.Address)
}
return nil
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("process objects batch: %w", err)
}
}
}
func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error {
var getPrm engine.GetPrm
getPrm.WithAddress(addr)
res, err := storageEngine.Get(ctx, getPrm)
if err != nil {
return fmt.Errorf("get object %s from storage engine: %w", addr, err)
}
if err := q.Put(ctx, res.Object()); err != nil {
return fmt.Errorf("put object %s to quarantine: %w", addr, err)
}
var delPrm engine.DeletePrm
delPrm.WithForceRemoval()
delPrm.WithAddress(addr)
if err = storageEngine.Delete(ctx, delPrm); err != nil {
return fmt.Errorf("delete object %s from storage engine: %w", addr, err)
}
return nil
}
type processStatus struct {
guard sync.RWMutex
statusCount map[status]uint64
count uint64
}
func (s *processStatus) add(st status) {
s.guard.Lock()
defer s.guard.Unlock()
s.statusCount[st]++
s.count++
}
func (s *processStatus) total() uint64 {
s.guard.RLock()
defer s.guard.RUnlock()
return s.count
}

View file

@ -1,203 +0,0 @@
package zombie
import (
"context"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/panjf2000/ants/v2"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
ngOpts := storageEngineOptions(c)
shardOpts := shardOptions(cmd, c)
e := engine.New(ngOpts...)
for _, opts := range shardOpts {
_, err := e.AddShard(cmd.Context(), opts...)
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
}
commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
return e
}
func storageEngineOptions(c *config.Config) []engine.Option {
return []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
}
}
func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
var result [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
result = append(result, getShardOpts(cmd, c, sh))
return nil
})
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
return result
}
func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
wc, wcEnabled := getWriteCacheOpts(sh)
return []shard.Option{
shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
shard.WithRefillMetabase(sh.RefillMetabase()),
shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
shard.WithMode(sh.Mode()),
shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
shard.WithWriteCache(wcEnabled),
shard.WithWriteCacheOptions(wc),
shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
return pool
}),
shard.WithLimiter(qos.NewNoopLimiter()),
}
}
func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
var result []writecache.Option
result = append(result,
writecache.WithPath(wc.Path()),
writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithMaxCacheCount(wc.CountLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
writecache.WithQoSLimiter(qos.NewNoopLimiter()),
)
return result, true
}
return nil, false
}
func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
var piloramaOpts []pilorama.Option
if config.BoolSafe(c.Sub("tree"), "enabled") {
pr := sh.Pilorama()
piloramaOpts = append(piloramaOpts,
pilorama.WithPath(pr.Path()),
pilorama.WithPerm(pr.Perm()),
pilorama.WithNoSync(pr.NoSync()),
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
)
}
return piloramaOpts
}
func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
return []meta.Option{
meta.WithPath(sh.Metabase().Path()),
meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
meta.WithBoltDBOptions(&bbolt.Options{
Timeout: 100 * time.Millisecond,
}),
meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
meta.WithEpochState(&epochState{}),
}
}
func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
result := []blobstor.Option{
blobstor.WithCompressObjects(sh.Compress()),
blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()),
blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()),
blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()),
blobstor.WithStorages(getSubStorages(ctx, sh)),
blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
return result
}
func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
var ss []blobstor.SubStorage
for _, storage := range sh.BlobStor().Storages() {
switch storage.Type() {
case blobovniczatree.Type:
sub := blobovniczaconfig.From((*config.Config)(storage))
blobTreeOpts := []blobovniczatree.Option{
blobovniczatree.WithRootPath(storage.Path()),
blobovniczatree.WithPermissions(storage.Perm()),
blobovniczatree.WithBlobovniczaSize(sub.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
blobovniczatree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
}
ss = append(ss, blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sh.SmallSizeLimit()
},
})
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storage))
fstreeOpts := []fstree.Option{
fstree.WithPath(storage.Path()),
fstree.WithPerm(storage.Perm()),
fstree.WithDepth(sub.Depth()),
fstree.WithNoSync(sub.NoSync()),
fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
ss = append(ss, blobstor.SubStorage{
Storage: fstree.New(fstreeOpts...),
Policy: func(_ *objectSDK.Object, _ []byte) bool {
return true
},
})
default:
// should never happen, that has already
// been handled: when the config was read
}
}
return ss
}
type epochState struct{}
func (epochState) CurrentEpoch() uint64 {
return 0
}

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
}
assert.NoError(w.Err)
if w.Err != nil {
panic(w.Err)
}
alphaRes, err := c.InvokeScript(w.Bytes(), nil)
if err != nil {
@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan
for i := range accounts {
emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash)
}
assert.NoError(w.Err)
if w.Err != nil {
panic(w.Err)
}
res, err := c.Run(w.Bytes())
if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) {

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/io"
@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
putContainer(bw, ch, cnt)
assert.NoError(bw.Err)
if bw.Err != nil {
panic(bw.Err)
}
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/cli/cmdargs"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
}
}
assert.NoError(writer.Err, "can't create deployment script")
if writer.Err != nil {
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
}
if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil {
return err
@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string
domain, int64(nns.TXT), address.Uint160ToString(cs.Hash))
}
assert.NoError(bw.Err, "can't create deployment script")
if bw.Len() != start {
if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err))
} else if bw.Len() != start {
writer.WriteBytes(bw.Bytes())
emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All)

View file

@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
} else {
sub.Reset()
emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag)
assert.NoError(sub.Err, "can't create version script")
if sub.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.WriteBytes(script)
bw.BinWriter.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}
}
emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target
assert.NoError(bw.Err, "can't create version script")
if bw.Err != nil {
panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err))
}
res, err := c.InvokeScript(bw.Bytes(), nil)
if err != nil {

View file

@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
}
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
switch c.(type) {
case *rpcclient.Client:
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
}
func CheckNotaryEnabled(c Client) error {

View file

@ -13,7 +13,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
@ -22,7 +21,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@ -30,6 +28,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen
}
act, err = actor.New(c.Client, signers)
} else {
assert.False(withConsensus, "BUG: should never happen")
if withConsensus {
panic("BUG: should never happen")
}
act, err = c.CommitteeAct, nil
}
if err != nil {
@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp
func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error {
version, err := c.Client.GetVersion()
// error appears only if client
// has not been initialized
assert.NoError(err)
if err != nil {
// error appears only if client
// has not been initialized
panic(err)
}
network := version.Protocol.Network
// Use parameter context to avoid dealing with signature order.
@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin
for i := range tx.Signers {
if tx.Signers[i].Account == h {
assert.True(i <= len(tx.Scripts), "BUG: invalid signing order")
if i < len(tx.Scripts) {
tx.Scripts[i] = *w
}
if i == len(tx.Scripts) {
} else if i == len(tx.Scripts) {
tx.Scripts = append(tx.Scripts, *w)
} else {
panic("BUG: invalid signing order")
}
return nil
}
@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
assert.NoError(bw.Err)
if bw.Err != nil {
panic(bw.Err)
}
return bw.Bytes(), false, nil
}
@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U
}
func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) {
avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone))
return !avail, err
res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone)
if err != nil {
return false, err
}
return res.State == vmstate.Halt.String(), nil
}
func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool {

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/config"
"github.com/nspcc-dev/neo-go/pkg/core"
@ -317,7 +316,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint
func (l *LocalClient) putTransactions() error {
// 1. Prepare new block.
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
assert.NoError(err)
if err != nil {
panic(err)
}
defer func() { l.transactions = l.transactions[:0] }()
b := &block.Block{
@ -358,7 +359,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s
w := io.NewBufBinWriter()
emit.Array(w.BinWriter, parameters...)
emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All)
assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
if w.Err != nil {
panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err))
}
return c.InvokeScript(w.Bytes(), signers)
}

View file

@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b
emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK)
emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All)
assert.NoError(w.Err, "can't wrap register script")
if w.Err != nil {
panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err))
}
}
func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error {

View file

@ -1,18 +1,21 @@
package initialize
import (
"errors"
"fmt"
"math/big"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/nspcc-dev/neo-go/pkg/core/native"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -27,8 +30,7 @@ const (
)
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
reader := neo.NewReader(c.ReadOnlyInvoker)
regPrice, err := reader.GetRegisterPrice()
regPrice, err := getCandidateRegisterPrice(c)
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
emit.Opcodes(w.BinWriter, opcode.ASSERT)
}
emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice)
assert.NoError(w.Err)
if w.Err != nil {
panic(fmt.Sprintf("BUG: %v", w.Err))
}
signers := []actor.SignerAccount{{
Signer: c.GetSigner(false, c.CommitteeAcc),
@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash
ok, err := transferNEOFinished(c)
ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil {
return err
}
@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx()
}
func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
r := neo.NewReader(c.ReadOnlyInvoker)
func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
}
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
switch c.Client.(type) {
case *rpcclient.Client:
inv := invoker.New(c.Client, nil)
reader := neo.NewReader(inv)
return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
if err != nil {
return 0, err
}
if len(res.Stack) == 0 {
return 0, errGetPriceInvalid
}
bi, err := res.Stack[0].TryInteger()
if err != nil || !bi.IsInt64() {
return 0, errGetPriceInvalid
}
return bi.Int64(), nil
}
}

View file

@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@ -42,7 +41,6 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(maintenance.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))

View file

@ -858,8 +858,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool
NewSplitHeader *objectSDK.SplitHeader
PayloadPatches []PayloadPatch
}
@ -890,11 +888,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
NewSplitHeader: prm.NewSplitHeader,
NewAttributes: prm.NewAttributes,
ReplaceAttributes: prm.ReplaceAttribute,
}) {
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {

View file

@ -44,7 +44,6 @@ is set to current epoch + n.
_ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account))
_ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC))
},
}
@ -82,7 +81,7 @@ func createToken(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err)
if iatRelative || expRelative || nvbRelative {
endpoint := viper.GetString(commonflags.RPC)
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
if len(endpoint) == 0 {
commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC))
}

View file

@ -5,9 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
"maps"
"os"
"slices"
"strings"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
@ -21,9 +19,8 @@ import (
)
type policyPlaygroundREPL struct {
cmd *cobra.Command
nodes map[string]netmap.NodeInfo
console *readline.Instance
cmd *cobra.Command
nodes map[string]netmap.NodeInfo
}
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
@ -43,7 +40,7 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error {
node.IterateAttributes(func(k, v string) {
attrs = append(attrs, fmt.Sprintf("%s:%q", k, v))
})
fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " "))
i++
}
return nil
@ -150,29 +147,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error {
for _, node := range ns {
ids = append(ids, hex.EncodeToString(node.PublicKey()))
}
fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids)
fmt.Printf("\t%2d: %v\n", i+1, ids)
}
return nil
}
func (repl *policyPlaygroundREPL) handleHelp(args []string) error {
if len(args) != 0 {
if _, ok := commands[args[0]]; !ok {
return fmt.Errorf("unknown command: %q", args[0])
}
fmt.Fprintln(repl.console, commands[args[0]].usage)
return nil
}
commandList := slices.Collect(maps.Keys(commands))
slices.Sort(commandList)
for _, command := range commandList {
fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion)
}
return nil
}
func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
var nm netmap.NetMap
var nodes []netmap.NodeInfo
@ -183,104 +163,15 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap {
return nm
}
type commandDescription struct {
descriprion string
usage string
}
var commands = map[string]commandDescription{
"list": {
descriprion: "Display all nodes in the netmap",
usage: `Display all nodes in the netmap
Example of usage:
list
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
`,
},
"ls": {
descriprion: "Display all nodes in the netmap",
usage: `Display all nodes in the netmap
Example of usage:
ls
1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"}
2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"}
`,
},
"add": {
descriprion: "Add a new node: add <node-hash> attr=value",
usage: `Add a new node
Example of usage:
add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`,
},
"load": {
descriprion: "Load netmap from file: load <path>",
usage: `Load netmap from file
Example of usage:
load "netmap.json"
File format (netmap.json):
{
"03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": {
"continent": "Europe",
"country": "Poland"
},
"02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": {
"continent": "Antarctica",
"country": "Heard Island"
}
}`,
},
"remove": {
descriprion: "Remove a node: remove <node-hash>",
usage: `Remove a node
Example of usage:
remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
},
"rm": {
descriprion: "Remove a node: rm <node-hash>",
usage: `Remove a node
Example of usage:
rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`,
},
"eval": {
descriprion: "Evaluate a policy: eval <policy>",
usage: `Evaluate a policy
Example of usage:
eval REP 2`,
},
"help": {
descriprion: "Show available commands",
},
}
func (repl *policyPlaygroundREPL) handleCommand(args []string) error {
if len(args) == 0 {
return nil
}
switch args[0] {
case "list", "ls":
return repl.handleLs(args[1:])
case "add":
return repl.handleAdd(args[1:])
case "load":
return repl.handleLoad(args[1:])
case "remove", "rm":
return repl.handleRemove(args[1:])
case "eval":
return repl.handleEval(args[1:])
case "help":
return repl.handleHelp(args[1:])
}
return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0])
}
var policyPlaygroundCompleter = readline.NewPrefixCompleter(
readline.PcItem("list"),
readline.PcItem("ls"),
readline.PcItem("add"),
readline.PcItem("load"),
readline.PcItem("remove"),
readline.PcItem("rm"),
readline.PcItem("eval"),
)
func (repl *policyPlaygroundREPL) run() error {
if len(viper.GetString(commonflags.RPC)) > 0 {
@ -299,32 +190,24 @@ func (repl *policyPlaygroundREPL) run() error {
}
}
if len(viper.GetString(netmapConfigPath)) > 0 {
err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)})
commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err)
cmdHandlers := map[string]func([]string) error{
"list": repl.handleLs,
"ls": repl.handleLs,
"add": repl.handleAdd,
"load": repl.handleLoad,
"remove": repl.handleRemove,
"rm": repl.handleRemove,
"eval": repl.handleEval,
}
var cfgCompleter []readline.PrefixCompleterInterface
var helpSubItems []readline.PrefixCompleterInterface
for name := range commands {
if name != "help" {
cfgCompleter = append(cfgCompleter, readline.PcItem(name))
helpSubItems = append(helpSubItems, readline.PcItem(name))
}
}
cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...))
completer := readline.NewPrefixCompleter(cfgCompleter...)
rl, err := readline.NewEx(&readline.Config{
Prompt: "> ",
InterruptPrompt: "^C",
AutoComplete: completer,
AutoComplete: policyPlaygroundCompleter,
})
if err != nil {
return fmt.Errorf("error initializing readline: %w", err)
}
repl.console = rl
defer rl.Close()
var exit bool
@ -342,8 +225,17 @@ func (repl *policyPlaygroundREPL) run() error {
}
exit = false
if err := repl.handleCommand(strings.Fields(line)); err != nil {
fmt.Fprintf(repl.console, "error: %v\n", err)
parts := strings.Fields(line)
if len(parts) == 0 {
continue
}
cmd := parts[0]
if handler, exists := cmdHandlers[cmd]; exists {
if err := handler(parts[1:]); err != nil {
fmt.Printf("error: %v\n", err)
}
} else {
fmt.Printf("error: unknown command %q\n", cmd)
}
}
}
@ -359,14 +251,6 @@ If a wallet and endpoint is provided, the initial netmap data will be loaded fro
},
}
const (
netmapConfigPath = "netmap-config"
netmapConfigUsage = "Path to the netmap configuration file"
)
func initContainerPolicyPlaygroundCmd() {
commonflags.Init(policyPlaygroundCmd)
policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage)
_ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath))
}

View file

@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
leftMinutes := int(leftSeconds / 60)
fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes)
sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes))
}
func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
hour := int(duration.Seconds() / 3600)
minute := int(duration.Seconds()/60) % 60
second := int(duration.Seconds()) % 60
fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second)
sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second))
}
}
func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if resp.GetBody().GetStartedAt() != nil {
startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC()
fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339))
sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339)))
}
}
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.GetBody().GetErrorMessage()) > 0 {
fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage())
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
}
}
@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes
default:
status = "undefined"
}
fmt.Fprintf(sb, " Status: %s.", status)
sb.WriteString(fmt.Sprintf(" Status: %s.", status))
}
func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
}
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.",
resp.GetBody().GetEvacuatedObjects(),
resp.GetBody().GetTotalObjects(),
resp.GetBody().GetFailedObjects(),
resp.GetBody().GetSkippedObjects(),
resp.GetBody().GetEvacuatedTrees(),
resp.GetBody().GetTotalTrees(),
resp.GetBody().GetFailedTrees())
resp.GetBody().GetFailedTrees()))
}
func initControlEvacuationShardCmd() {

View file

@ -18,7 +18,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// object lock command.
@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
endpoint := viper.GetString(commonflags.RPC)
endpoint, _ := cmd.Flags().GetString(commonflags.RPC)
currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint)
commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err)

View file

@ -48,12 +48,6 @@ type ecHeader struct {
parent oid.ID
}
type objectCounter struct {
sync.Mutex
total uint32
isECcounted bool
}
type objectPlacement struct {
requiredNodes []netmapSDK.NodeInfo
confirmedNodes []netmapSDK.NodeInfo
@ -62,7 +56,6 @@ type objectPlacement struct {
type objectNodesResult struct {
errors []error
placements map[oid.ID]objectPlacement
total uint32
}
type ObjNodesDataObject struct {
@ -113,18 +106,18 @@ func objectNodes(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk)
objects := getPhyObjects(cmd, cnrID, objID, cli, pk)
placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli)
result := getRequiredPlacement(cmd, objects, placementPolicy, netmap)
getActualPlacement(cmd, netmap, pk, objects, count, result)
getActualPlacement(cmd, netmap, pk, objects, result)
printPlacement(cmd, objID, objects, result)
}
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) {
func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@ -152,7 +145,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
parent: res.Header().ECHeader().Parent(),
}
}
return []phyObject{obj}, 1
return []phyObject{obj}
}
var errSplitInfo *objectSDK.SplitInfoError
@ -162,34 +155,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1
return getECObjectChunks(cmd, cnrID, objID, ecInfoError)
}
commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err)
return nil, 0
return nil
}
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) {
members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total
func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject {
members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo)
return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead)
}
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) {
var total int
func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID {
splitInfo := errSplitInfo.SplitInfo()
if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok {
if total = len(members); total > 0 {
total-- // linking object is not data object
}
return members, total
return members
}
if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok {
return members, len(members)
return members
}
members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
return members, len(members)
return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID)
}
func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject {
@ -395,11 +383,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
}
}
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) {
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) {
resultMtx := &sync.Mutex{}
counter := &objectCounter{
total: uint32(count),
}
candidates := getNodesToCheckObjectExistance(cmd, netmap, result)
@ -416,7 +401,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
for _, object := range objects {
eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter)
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock()
defer resultMtx.Unlock()
if err == nil && stored {
@ -435,7 +420,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait())
result.total = counter.total
}
func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo {
@ -494,7 +478,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N
return cli, nil
}
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) {
func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) {
var addrObj oid.Address
addrObj.SetContainer(cnrID)
addrObj.SetObject(objID)
@ -509,14 +493,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
res, err := internalclient.HeadObject(ctx, prmHead)
if err == nil && res != nil {
if res.Header().ECHeader() != nil {
counter.Lock()
defer counter.Unlock()
if !counter.isECcounted {
counter.total *= res.Header().ECHeader().Total()
}
counter.isECcounted = true
}
return true, nil
}
var notFound *apistatus.ObjectNotFound
@ -536,8 +512,7 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
}
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total)
fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects))
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
for _, object := range objects {
fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID)

View file

@ -2,7 +2,6 @@ package object
import (
"fmt"
"os"
"strconv"
"strings"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
splitHeaderFlagName = "split-header"
)
var objectPatchCmd = &cobra.Command{
@ -53,7 +50,6 @@ func initObjectPatchCmd() {
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
}
func patch(cmd *cobra.Command, _ []string) {
@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
@ -153,22 +147,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
if path == "" {
return nil
}
data, err := os.ReadFile(path)
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
splitHdrV2 := new(objectV2.SplitHeader)
err = splitHdrV2.Unmarshal(data)
if err != nil {
err = splitHdrV2.UnmarshalJSON(data)
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
}
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
}

View file

@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool {
if ok {
toJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
toProto, _ := cmd.Flags().GetBool("proto")
if !toJSON && !toProto {
if !(toJSON || toProto) {
cmd.PrintErrln("Object is erasure-encoded, ec information received.")
}
printECInfo(cmd, errECInfo.ECInfo())

View file

@ -2,19 +2,17 @@ package tree
import (
"context"
"crypto/tls"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@ -33,16 +31,6 @@ func _client() (tree.TreeServiceClient, error) {
return nil, err
}
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(),
@ -52,10 +40,13 @@ func _client() (tree.TreeServiceClient, error) {
),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
grpc.WithTransportCredentials(creds),
}
cc, err := grpc.NewClient(host, opts...)
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}

View file

@ -4,14 +4,12 @@ import (
"context"
"os"
"os/signal"
"strconv"
"syscall"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/cast"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@ -46,30 +44,11 @@ func reloadConfig() error {
if err != nil {
return err
}
err = logPrm.SetTags(loggerTags())
if err != nil {
return err
}
logger.UpdateLevelForTags(logPrm)
log.Reload(logPrm)
return nil
}
func loggerTags() [][]string {
var res [][]string
for i := 0; ; i++ {
var item []string
index := strconv.FormatInt(int64(i), 10)
names := cast.ToString(cfg.Get("logger.tags." + index + ".names"))
if names == "" {
break
}
item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level")))
res = append(res, item)
}
return res
}
func watchForSignal(ctx context.Context, cancel func()) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)

View file

@ -80,14 +80,10 @@ func main() {
exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
err = logPrm.SetTags(loggerTags())
exitErr(err)
log, err = logger.NewLogger(logPrm)
exitErr(err)
logger.UpdateLevelForTags(logPrm)
ctx, cancel := context.WithCancel(context.Background())
pprofCmp = newPprofComponent()

View file

@ -3,8 +3,6 @@ package common
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
)
type FilterResult byte
@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser {
func (p Parser) ToFallbackParser() FallbackParser {
return func(key, value []byte) (SchemaEntry, Parser) {
entry, next, err := p(key, value)
assert.NoError(err, "couldn't use that parser as a fallback parser")
if err != nil {
panic(fmt.Errorf(
"couldn't use that parser as a fallback parser, it returned an error: %w", err,
))
}
return entry, next
}
}

View file

@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++
// Stop iterating over history.
if f.historyPointer == len(f.history) {
f.SetText(f.currentContent)
f.InputField.SetText(f.currentContent)
return
}
f.SetText(f.history[f.historyPointer])
f.InputField.SetText(f.history[f.historyPointer])
case tcell.KeyUp:
if len(f.history) == 0 {
return
}
// Start iterating over history.
if f.historyPointer == len(f.history) {
f.currentContent = f.GetText()
f.currentContent = f.InputField.GetText()
}
// End of history.
if f.historyPointer == 0 {
@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
}
// Iterate to least recent prompts.
f.historyPointer--
f.SetText(f.history[f.historyPointer])
f.InputField.SetText(f.history[f.historyPointer])
default:
f.InputField.InputHandler()(event, func(tview.Primitive) {})
}

View file

@ -8,7 +8,6 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@ -95,7 +94,9 @@ func (v *RecordsView) Mount(ctx context.Context) error {
}
func (v *RecordsView) Unmount() {
assert.False(v.onUnmount == nil, "try to unmount not mounted component")
if v.onUnmount == nil {
panic("try to unmount not mounted component")
}
v.onUnmount()
v.onUnmount = nil
}

View file

@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
}
ui.MouseHandler()
ui.Box.MouseHandler()
}
func (ui *UI) WithPrompt(prompt string) error {

View file

@ -14,7 +14,7 @@ import (
func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
c.key,
c.shared.key,
c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)

View file

@ -1,27 +1,20 @@
package main
import (
"bytes"
"cmp"
"context"
"slices"
"sync"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
)
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
@ -117,6 +110,55 @@ func (c *ttlNetCache[K, V]) remove(key K) {
hit = c.cache.Remove(key)
}
// entity that provides LRU cache interface.
type lruNetCache struct {
cache *lru.Cache[uint64, *netmapSDK.NetMap]
netRdr netValueReader[uint64, *netmapSDK.NetMap]
metrics cacheMetrics
}
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache {
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
fatalOnErr(err)
return &lruNetCache{
cache: cache,
netRdr: netRdr,
metrics: metrics,
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
val, ok := c.cache.Get(key)
if ok {
hit = true
return val, nil
}
val, err := c.netRdr(ctx, key)
if err != nil {
return nil, err
}
c.cache.Add(key, val)
return val, nil
}
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
@ -158,236 +200,20 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con
type lruNetmapSource struct {
netState netmap.State
client rawSource
cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]]
mtx sync.RWMutex
metrics cacheMetrics
log *logger.Logger
candidates atomic.Pointer[[]netmapSDK.NodeInfo]
cache *lruNetCache
}
type rawSource interface {
GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error)
GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error)
}
func newCachedNetmapStorage(ctx context.Context, log *logger.Logger,
netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration,
) netmap.Source {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil)
fatalOnErr(err)
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key)
}, metrics.NewCacheMetrics("netmap"))
src := &lruNetmapSource{
netState: netState,
client: client,
cache: cache,
log: log,
metrics: metrics.NewCacheMetrics("netmap"),
return &lruNetmapSource{
netState: s,
cache: lruNetmapCache,
}
wg.Add(1)
go func() {
defer wg.Done()
src.updateCandidates(ctx, d)
}()
return src
}
// updateCandidates routine to merge netmap in cache with candidates list.
func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) {
timer := time.NewTimer(d)
defer timer.Stop()
for {
select {
case <-ctx.Done():
return
case <-timer.C:
newCandidates, err := s.client.GetCandidates(ctx)
if err != nil {
s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err))
timer.Reset(d)
break
}
if len(newCandidates) == 0 {
s.candidates.Store(&newCandidates)
timer.Reset(d)
break
}
slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
return cmp.Compare(n1.Hash(), n2.Hash())
})
// Check once state changed
v := s.candidates.Load()
if v == nil {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
timer.Reset(d)
break
}
ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int {
if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) ||
uint32(n1.Status()) != uint32(n2.Status()) ||
slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 {
return 1
}
var ne1 []string
n1.IterateNetworkEndpoints(func(s string) bool {
ne1 = append(ne1, s)
return false
})
var ne2 []string
n2.IterateNetworkEndpoints(func(s string) bool {
ne2 = append(ne2, s)
return false
})
return slices.Compare(ne1, ne2)
})
if ret != 0 {
s.candidates.Store(&newCandidates)
s.mergeCacheWithCandidates(newCandidates)
}
timer.Reset(d)
}
}
}
func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) {
s.mtx.Lock()
tmp := s.cache.Values()
s.mtx.Unlock()
for _, pointer := range tmp {
nm := pointer.Load()
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
nm = nm.Clone()
mergeNetmapWithCandidates(updates, nm)
pointer.Store(nm)
}
}
}
// reads value by the key.
//
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit)
}()
s.mtx.RLock()
val, ok := s.cache.Get(key)
s.mtx.RUnlock()
if ok {
hit = true
return val.Load(), nil
}
s.mtx.Lock()
defer s.mtx.Unlock()
val, ok = s.cache.Get(key)
if ok {
hit = true
return val.Load(), nil
}
nm, err := s.client.GetNetMapByEpoch(ctx, key)
if err != nil {
return nil, err
}
v := s.candidates.Load()
if v != nil {
updates := getNetMapNodesToUpdate(nm, *v)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
}
p := atomic.Pointer[netmapSDK.NetMap]{}
p.Store(nm)
s.cache.Add(key, &p)
return nm, nil
}
// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates.
func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) {
for _, v := range updates {
if v.status != netmapSDK.UnspecifiedState {
nm.Nodes()[v.netmapIndex].SetStatus(v.status)
}
if v.externalAddresses != nil {
nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...)
}
if v.endpoints != nil {
nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...)
}
}
}
type nodeToUpdate struct {
netmapIndex int
status netmapSDK.NodeState
externalAddresses []string
endpoints []string
}
// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates.
func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate {
var res []nodeToUpdate
for i := range nm.Nodes() {
for _, cnd := range candidates {
if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) {
var tmp nodeToUpdate
var update bool
if cnd.Status() != nm.Nodes()[i].Status() &&
(cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) {
update = true
tmp.status = cnd.Status()
}
externalAddresses := cnd.ExternalAddresses()
if externalAddresses != nil &&
slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 {
update = true
tmp.externalAddresses = externalAddresses
}
nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints())
nm.Nodes()[i].IterateNetworkEndpoints(func(s string) bool {
nodeEndpoints = append(nodeEndpoints, s)
return false
})
candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints())
cnd.IterateNetworkEndpoints(func(s string) bool {
candidateEndpoints = append(candidateEndpoints, s)
return false
})
if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 {
update = true
tmp.endpoints = candidateEndpoints
}
if update {
tmp.netmapIndex = i
res = append(res, tmp)
}
break
}
}
}
return res
}
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
@ -399,7 +225,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*
}
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.get(ctx, epoch)
val, err := s.cache.get(ctx, epoch)
if err != nil {
return nil, err
}

View file

@ -3,11 +3,9 @@ package main
import (
"context"
"errors"
"sync"
"testing"
"time"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/stretchr/testify/require"
)
@ -61,75 +59,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) {
type noopCacheMetricts struct{}
func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {}
type rawSrc struct{}
func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) {
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Online)
node0.SetExternalAddresses("1", "0")
node0.SetNetworkEndpoints("1", "0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Online)
node1.SetExternalAddresses("1", "0")
node1.SetNetworkEndpoints("1", "0")
return []netmapSDK.NodeInfo{node0, node1}, nil
}
func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
nm := netmapSDK.NetMap{}
nm.SetEpoch(1)
node0 := netmapSDK.NodeInfo{}
node0.SetPublicKey([]byte{byte(1)})
node0.SetStatus(netmapSDK.Maintenance)
node0.SetExternalAddresses("0")
node0.SetNetworkEndpoints("0")
node1 := netmapSDK.NodeInfo{}
node1.SetPublicKey([]byte{byte(1)})
node1.SetStatus(netmapSDK.Maintenance)
node1.SetExternalAddresses("0")
node1.SetNetworkEndpoints("0")
nm.SetNodes([]netmapSDK.NodeInfo{node0, node1})
return &nm, nil
}
type st struct{}
func (s *st) CurrentEpoch() uint64 {
return 1
}
func TestNetmapStorage(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
wg := sync.WaitGroup{}
cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50)
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance)
require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1)
require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1)
require.Eventually(t, func() bool {
nm, err := cache.GetNetMapByEpoch(ctx, 1)
require.NoError(t, err)
for _, node := range nm.Nodes() {
if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 &&
node.NumberOfNetworkEndpoints() == 2) {
return false
}
}
return true
}, time.Second*5, time.Millisecond*10)
cancel()
wg.Wait()
}

View file

@ -108,8 +108,6 @@ type applicationConfiguration struct {
level string
destination string
timestamp bool
options []zap.Option
tags [][]string
}
ObjectCfg struct {
@ -234,15 +232,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
var opts []zap.Option
if loggerconfig.ToLokiConfig(c).Enabled {
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
return lokiCore
})}
}
a.LoggerCfg.options = opts
a.LoggerCfg.tags = loggerconfig.Tags(c)
// Object
@ -385,11 +374,14 @@ func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardco
}
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
limitsConfig := source.Limits().ToConfig()
limitsConfig := source.Limits()
limiter, err := qos.NewLimiter(limitsConfig)
if err != nil {
return err
}
if target.limiter != nil {
target.limiter.Close()
}
target.limiter = limiter
return nil
}
@ -726,7 +718,12 @@ func initCfg(appCfg *config.Config) *cfg {
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
logger.UpdateLevelForTags(logPrm)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
}
c.internals = initInternals(appCfg, log)
@ -1093,12 +1090,6 @@ func (c *cfg) loggerPrm() (logger.Prm, error) {
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
prm.Options = c.LoggerCfg.options
err = prm.SetTags(c.LoggerCfg.tags)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination)
}
return prm, nil
}
@ -1386,7 +1377,7 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp {
if err != nil {
return err
}
logger.UpdateLevelForTags(prm)
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"runtime", func() error {

View file

@ -11,10 +11,10 @@ import (
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"github.com/stretchr/testify/require"
)
@ -135,8 +135,8 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
readLimits := limits.ToConfig().Read
writeLimits := limits.ToConfig().Write
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
@ -144,7 +144,7 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
require.ElementsMatch(t, readLimits.Tags,
[]qos.IOTagConfig{
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(20),
@ -168,19 +168,13 @@ func TestEngineSection(t *testing.T) {
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
Prohibited: true,
},
{
Tag: "treesync",
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,
[]qos.IOTagConfig{
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(200),
@ -208,11 +202,6 @@ func TestEngineSection(t *testing.T) {
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "treesync",
Weight: toPtr(50),
LimitOps: toPtr(100),
},
})
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
@ -269,14 +258,14 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
readLimits := limits.ToConfig().Read
writeLimits := limits.ToConfig().Write
require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps)
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
require.Equal(t, 0, len(readLimits.Tags))
require.Equal(t, 0, len(writeLimits.Tags))
}

View file

@ -1,13 +1,19 @@
package limits
import (
"math"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"github.com/spf13/cast"
)
const (
NoLimit int64 = math.MaxInt64
DefaultIdleTimeout = 5 * time.Minute
)
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
@ -17,43 +23,36 @@ func From(c *config.Config) *Config {
// which provides access to Shard's limits configurations.
type Config config.Config
func (x *Config) ToConfig() qos.LimiterConfig {
result := qos.LimiterConfig{
Read: x.read(),
Write: x.write(),
}
panicOnErr(result.Validate())
return result
}
func (x *Config) read() qos.OpConfig {
// Read returns the value of "read" limits config section.
func (x *Config) Read() OpConfig {
return x.parse("read")
}
func (x *Config) write() qos.OpConfig {
// Write returns the value of "write" limits config section.
func (x *Config) Write() OpConfig {
return x.parse("write")
}
func (x *Config) parse(sub string) qos.OpConfig {
func (x *Config) parse(sub string) OpConfig {
c := (*config.Config)(x).Sub(sub)
var result qos.OpConfig
var result OpConfig
if s := config.Int(c, "max_waiting_ops"); s > 0 {
result.MaxWaitingOps = s
} else {
result.MaxWaitingOps = qos.NoLimit
result.MaxWaitingOps = NoLimit
}
if s := config.Int(c, "max_running_ops"); s > 0 {
result.MaxRunningOps = s
} else {
result.MaxRunningOps = qos.NoLimit
result.MaxRunningOps = NoLimit
}
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
result.IdleTimeout = s
} else {
result.IdleTimeout = qos.DefaultIdleTimeout
result.IdleTimeout = DefaultIdleTimeout
}
result.Tags = tags(c)
@ -61,16 +60,42 @@ func (x *Config) parse(sub string) qos.OpConfig {
return result
}
func tags(c *config.Config) []qos.IOTagConfig {
type OpConfig struct {
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxWaitingOps int64
// MaxRunningOps returns the value of "max_running_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxRunningOps int64
// IdleTimeout returns the value of "idle_timeout" config parameter.
//
// Equals DefaultIdleTimeout if the value is not a valid duration.
IdleTimeout time.Duration
// Tags returns the value of "tags" config parameter.
//
// Equals nil if the value is not a valid tags config slice.
Tags []IOTagConfig
}
type IOTagConfig struct {
Tag string
Weight *float64
LimitOps *float64
ReservedOps *float64
}
func tags(c *config.Config) []IOTagConfig {
c = c.Sub("tags")
var result []qos.IOTagConfig
var result []IOTagConfig
for i := 0; ; i++ {
tag := config.String(c, strconv.Itoa(i)+".tag")
if tag == "" {
return result
}
var tagConfig qos.IOTagConfig
var tagConfig IOTagConfig
tagConfig.Tag = tag
v := c.Value(strconv.Itoa(i) + ".weight")
@ -94,13 +119,6 @@ func tags(c *config.Config) []qos.IOTagConfig {
tagConfig.ReservedOps = &r
}
v = c.Value(strconv.Itoa(i) + ".prohibited")
if v != nil {
r, err := cast.ToBoolE(v)
panicOnErr(err)
tagConfig.Prohibited = r
}
result = append(result, tagConfig)
}
}

View file

@ -2,7 +2,6 @@ package loggerconfig
import (
"os"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
@ -61,21 +60,6 @@ func Timestamp(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "timestamp")
}
// Tags returns the value of "tags" config parameter from "logger" section.
func Tags(c *config.Config) [][]string {
var res [][]string
sub := c.Sub(subsection).Sub("tags")
for i := 0; ; i++ {
s := sub.Sub(strconv.FormatInt(int64(i), 10))
names := config.StringSafe(s, "names")
if names == "" {
break
}
res = append(res, []string{names, config.StringSafe(s, "level")})
}
return res
}
// ToLokiConfig extracts loki config.
func ToLokiConfig(c *config.Config) loki.Config {
hostname, _ := os.Hostname()

View file

@ -33,9 +33,6 @@ const (
// ContainerCacheSizeDefault represents the default size for the container cache.
ContainerCacheSizeDefault = 100
// PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates.
PollCandidatesTimeoutDefault = 20 * time.Second
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@ -157,17 +154,3 @@ func FrostfsIDCacheSize(c *config.Config) uint32 {
}
return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size")
}
// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter
// from "morph" section.
//
// Returns PollCandidatesTimeoutDefault if the value is not positive duration.
func NetmapCandidatesPollInterval(c *config.Config) time.Duration {
v := config.DurationSafe(c.Sub(subsection).
Sub("netmap").Sub("candidates"), "poll_interval")
if v > 0 {
return v
}
return PollCandidatesTimeoutDefault
}

View file

@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) {
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
fatalOnErr(err)
c.cnrClient = wrap
c.shared.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
c.frostfsidClient = frostfsIDSubjectProvider
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService(
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),

View file

@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.metrics == nil {
c.metrics = new(httpComponent)
c.metrics.cfg = c
c.metrics.name = "metrics"
c.metrics.handler = metrics.Handler()
if c.dynamicConfiguration.metrics == nil {
c.dynamicConfiguration.metrics = new(httpComponent)
c.dynamicConfiguration.metrics.cfg = c
c.dynamicConfiguration.metrics.name = "metrics"
c.dynamicConfiguration.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
if enabled != c.metrics.enabled {
c.metrics.enabled = enabled
if enabled != c.dynamicConfiguration.metrics.enabled {
c.dynamicConfiguration.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
if address != c.metrics.address {
c.metrics.address = address
if address != c.dynamicConfiguration.metrics.address {
c.dynamicConfiguration.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
if dur != c.metrics.shutdownDur {
c.metrics.shutdownDur = dur
if dur != c.dynamicConfiguration.metrics.shutdownDur {
c.dynamicConfiguration.metrics.shutdownDur = dur
updated = true
}
return c.metrics, updated
return c.dynamicConfiguration.metrics, updated
}
func enableMetricsSvc(c *cfg) {
c.metricsSvc.Enable()
c.shared.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
c.metricsSvc.Disable()
c.shared.metricsSvc.Disable()
}

View file

@ -60,11 +60,10 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
}
if c.cfgMorph.cacheTTL < 0 {
netmapSource = newRawNetmapStorage(wrap)
netmapSource = wrap
} else {
// use RPC node as source of netmap (with caching)
netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg,
morphconfig.NetmapCandidatesPollInterval(c.appCfg))
netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap)
}
c.netMapSource = netmapSource

View file

@ -1,55 +0,0 @@
package main
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
type rawNetmapSource struct {
client *netmapClient.Client
}
func newRawNetmapStorage(client *netmapClient.Client) netmap.Source {
return &rawNetmapSource{
client: client,
}
}
func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
nm, err := s.client.GetNetMap(ctx, diff)
if err != nil {
return nil, err
}
candidates, err := s.client.GetCandidates(ctx)
if err != nil {
return nil, err
}
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
return nm, nil
}
func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
nm, err := s.client.GetNetMapByEpoch(ctx, epoch)
if err != nil {
return nil, err
}
candidates, err := s.client.GetCandidates(ctx)
if err != nil {
return nil, err
}
updates := getNetMapNodesToUpdate(nm, candidates)
if len(updates) > 0 {
mergeNetmapWithCandidates(updates, nm)
}
return nm, nil
}
func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) {
return s.client.Epoch(ctx)
}

View file

@ -186,9 +186,9 @@ func initObjectService(c *cfg) {
respSvc,
)
c.metricsSvc = objectService.NewMetricCollector(
c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
@ -432,7 +432,7 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
c.frostfsidClient,
c.shared.frostfsidClient,
c.netMapSource,
c.cfgNetmap.state,
c.cfgObject.cnrSource,

View file

@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) {
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.pprof == nil {
c.pprof = new(httpComponent)
c.pprof.cfg = c
c.pprof.name = "pprof"
c.pprof.handler = httputil.Handler()
c.pprof.preReload = tuneProfilers
if c.dynamicConfiguration.pprof == nil {
c.dynamicConfiguration.pprof = new(httpComponent)
c.dynamicConfiguration.pprof.cfg = c
c.dynamicConfiguration.pprof.name = "pprof"
c.dynamicConfiguration.pprof.handler = httputil.Handler()
c.dynamicConfiguration.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
if enabled != c.pprof.enabled {
c.pprof.enabled = enabled
if enabled != c.dynamicConfiguration.pprof.enabled {
c.dynamicConfiguration.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
if address != c.pprof.address {
c.pprof.address = address
if address != c.dynamicConfiguration.pprof.address {
c.dynamicConfiguration.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
if dur != c.pprof.shutdownDur {
c.pprof.shutdownDur = dur
if dur != c.dynamicConfiguration.pprof.shutdownDur {
c.dynamicConfiguration.pprof.shutdownDur = dur
updated = true
}
return c.pprof, updated
return c.dynamicConfiguration.pprof, updated
}
func tuneProfilers(c *cfg) {

View file

@ -43,9 +43,6 @@ func initQoSService(c *cfg) {
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
@ -76,8 +73,20 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return ctx
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
@ -86,23 +95,3 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, publicKey) {
return true
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return false
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), publicKey) {
return true
}
}
return false
}

View file

@ -1,226 +0,0 @@
package main
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSService_Client(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag client defined", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func TestQoSService_Internal(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
}
func TestQoSService_Critical(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
}
func TestQoSService_NetmapGetError(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
s.netmapSource = &utilTesting.TestNetmapSource{}
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
nmSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
reqSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedCritSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedIntSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
var node netmap.NodeInfo
node.SetPublicKey(nmSigner.PublicKey().Bytes())
nm := &netmap.NetMap{}
nm.SetEpoch(100)
nm.SetNodes([]netmap.NodeInfo{node})
return &cfgQoSService{
logger: test.NewLogger(t),
netmapSource: &utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
100: nm,
},
CurrentEpoch: 100,
},
allowedCriticalPubs: [][]byte{
allowedCritSigner.PublicKey().Bytes(),
},
allowedInternalPubs: [][]byte{
allowedIntSigner.PublicKey().Bytes(),
},
},
&testQoSServicePublicKeys{
NetmapNode: nmSigner.PublicKey().Bytes(),
Request: reqSigner.PublicKey().Bytes(),
Internal: allowedIntSigner.PublicKey().Bytes(),
Critical: allowedCritSigner.PublicKey().Bytes(),
}
}
type testQoSServicePublicKeys struct {
NetmapNode []byte
Request []byte
Internal []byte
Critical []byte
}

View file

@ -51,9 +51,9 @@ func initTreeService(c *cfg) {
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
cli: c.cnrClient,
cli: c.shared.cnrClient,
}),
tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log),

View file

@ -30,11 +30,6 @@ func validateConfig(c *config.Config) error {
return fmt.Errorf("invalid logger destination: %w", err)
}
err = loggerPrm.SetTags(loggerconfig.Tags(c))
if err != nil {
return fmt.Errorf("invalid list of allowed tags: %w", err)
}
// shard configuration validation
shardNum := 0

View file

@ -1,7 +1,5 @@
FROSTFS_IR_LOGGER_LEVEL=info
FROSTFS_IR_LOGGER_TIMESTAMP=true
FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph"
FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug"
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX

View file

@ -3,9 +3,6 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
timestamp: true
tags:
- names: "main, morph" # Possible values: `main`, `morph`, `grpc_svc`, `ir`, `processor`.
level: debug
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file

View file

@ -180,10 +180,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
@ -201,9 +197,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100
## 1 shard
### Flag to refill Metabase from BlobStor

View file

@ -252,13 +252,7 @@
{
"tag": "policer",
"weight": 5,
"limit_ops": 25000,
"prohibited": true
},
{
"tag": "treesync",
"weight": 5,
"limit_ops": 25
"limit_ops": 25000
}
]
},
@ -293,11 +287,6 @@
"tag": "policer",
"weight": 50,
"limit_ops": 2500
},
{
"tag": "treesync",
"weight": 50,
"limit_ops": 100
}
]
}

View file

@ -95,9 +95,6 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
ape_chain_cache_size: 100000
netmap:
candidates:
poll_interval: 20s
apiclient:
dial_timeout: 15s # timeout for FrostFS API client connection
@ -151,7 +148,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads
metabase:
perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@ -164,13 +161,13 @@ storage:
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@ -252,10 +249,6 @@ storage:
- tag: policer
weight: 5
limit_ops: 25000
prohibited: true
- tag: treesync
weight: 5
limit_ops: 25
write:
max_running_ops: 1000
max_waiting_ops: 100
@ -278,9 +271,6 @@ storage:
- tag: policer
weight: 50
limit_ops: 2500
- tag: treesync
weight: 50
limit_ops: 100
1:
writecache:
@ -300,7 +290,7 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
perm: 0o644 # permission to use for the database file and intermediate directories
perm: 0644 # permission to use for the database file and intermediate directories
tracing:
enabled: true

View file

@ -148,19 +148,15 @@ morph:
- address: wss://rpc2.morph.frostfs.info:40341/ws
priority: 2
switch_interval: 2m
netmap:
candidates:
poll_interval: 20s
```
| Parameter | Type | Default value | Description |
|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. |
| Parameter | Type | Default value | Description |
| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. |
| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).<br/>Negative value disables caching.<br/>Cached entities: containers, container lists, eACL tables. |
| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. |
| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. |
| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. |
## `rpc_endpoint` subsection
| Parameter | Type | Default value | Description |
@ -213,7 +209,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
perm: 0o644
perm: 0644
size: 4194304
depth: 1
width: 4
@ -273,7 +269,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
perm: 0o644
perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@ -363,7 +359,6 @@ limits:
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section

8
go.mod
View file

@ -6,13 +6,13 @@ require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/VictoriaMetrics/easyproto v0.1.4

16
go.sum
View file

@ -4,22 +4,22 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d h1:ZLKDupw362Ciing7kdIZhDYGMyo2QZyJ6sS/8X9QWJ0=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d/go.mod h1:2PWt5GwJTnhjHp+mankcfCeAJBMn7puxPm+RS+lliVk=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=

View file

@ -1,25 +1,9 @@
package assert
import (
"fmt"
"strings"
)
import "strings"
func True(cond bool, details ...string) {
if !cond {
panic(strings.Join(details, " "))
}
}
func False(cond bool, details ...string) {
if cond {
panic(strings.Join(details, " "))
}
}
func NoError(err error, details ...string) {
if err != nil {
content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " "))
panic(content)
}
}

View file

@ -512,8 +512,7 @@ const (
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
FailedToUpdateNetmapCandidates = "update netmap candidates failed"
)

View file

@ -1,31 +0,0 @@
package qos
import (
"math"
"time"
)
const (
NoLimit int64 = math.MaxInt64
DefaultIdleTimeout = 5 * time.Minute
)
type LimiterConfig struct {
Read OpConfig
Write OpConfig
}
type OpConfig struct {
MaxWaitingOps int64
MaxRunningOps int64
IdleTimeout time.Duration
Tags []IOTagConfig
}
type IOTagConfig struct {
Tag string
Weight *float64
LimitOps *float64
ReservedOps *float64
Prohibited bool
}

View file

@ -22,7 +22,7 @@ var (
errTest = errors.New("mock")
errWrongTag = errors.New("wrong tag")
errNoTag = errors.New("failed to get tag from context")
errResExhausted *apistatus.ResourceExhausted
errResExhausted = new(apistatus.ResourceExhausted)
tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
)
@ -37,36 +37,38 @@ func (m *mockGRPCServerStream) Context() context.Context {
}
type limiter struct {
acquired bool
released bool
}
func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
l.acquired = true
if key != okKey {
return nil, false
}
return func() { l.released = true }, true
}
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) (bool, error) {
interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
called := false
handler := func(ctx context.Context, req any) (any, error) {
called = true
return nil, errTest
}
_, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
return err
return called, err
}
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) (bool, error) {
interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
called := false
handler := func(srv any, stream grpc.ServerStream) error {
called = true
return errTest
}
err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
FullMethod: methodName,
}, handler)
return err
return called, err
}
func Test_MaxActiveRPCLimiter(t *testing.T) {
@ -74,61 +76,55 @@ func Test_MaxActiveRPCLimiter(t *testing.T) {
t.Run("unary fail", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
called, err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
require.EqualError(t, err, errResExhausted.Error())
require.False(t, called)
})
t.Run("unary pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
called, err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
require.EqualError(t, err, errTest.Error())
require.True(t, called)
require.False(t, lim.released)
})
t.Run("unary pass", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
called, err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.EqualError(t, err, errTest.Error())
require.True(t, called && lim.released)
})
// StreamServerInterceptor
t.Run("stream fail", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
called, err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
require.EqualError(t, err, errResExhausted.Error())
require.False(t, called)
})
t.Run("stream pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := streamMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
called, err := streamMaxActiveRPCLimiter(ctx, &lim, "")
require.EqualError(t, err, errTest.Error())
require.True(t, called)
require.False(t, lim.released)
})
t.Run("stream pass", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
called, err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.EqualError(t, err, errTest.Error())
require.True(t, called && lim.released)
})
}
func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
called := false
handler := func(ctx context.Context, req any) (any, error) {
called = true
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
return nil, nil
}
@ -136,7 +132,6 @@ func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
}
_, err := interceptor(context.Background(), nil, nil, handler)
require.NoError(t, err)
require.True(t, called)
}
func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {

View file

@ -8,6 +8,7 @@ import (
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
"git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -36,15 +37,15 @@ type scheduler interface {
Close()
}
func NewLimiter(c LimiterConfig) (Limiter, error) {
if err := c.Validate(); err != nil {
func NewLimiter(c *limits.Config) (Limiter, error) {
if err := validateConfig(c); err != nil {
return nil, err
}
readScheduler, err := createScheduler(c.Read)
readScheduler, err := createScheduler(c.Read())
if err != nil {
return nil, fmt.Errorf("create read scheduler: %w", err)
}
writeScheduler, err := createScheduler(c.Write)
writeScheduler, err := createScheduler(c.Write())
if err != nil {
return nil, fmt.Errorf("create write scheduler: %w", err)
}
@ -62,8 +63,8 @@ func NewLimiter(c LimiterConfig) (Limiter, error) {
return l, nil
}
func createScheduler(config OpConfig) (scheduler, error) {
if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit {
func createScheduler(config limits.OpConfig) (scheduler, error) {
if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit {
return newSemaphoreScheduler(config.MaxRunningOps), nil
}
return scheduling.NewMClock(
@ -71,7 +72,7 @@ func createScheduler(config OpConfig) (scheduler, error) {
converToSchedulingTags(config.Tags), config.IdleTimeout)
}
func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo {
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
result := make(map[string]scheduling.TagInfo)
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
result[tag.String()] = scheduling.TagInfo{
@ -89,7 +90,6 @@ func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo
if l.ReservedOps != nil && *l.ReservedOps != 0 {
v.ReservedIOPS = l.ReservedOps
}
v.Prohibited = l.Prohibited
result[l.Tag] = v
}
return result
@ -164,7 +164,8 @@ func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (R
rel, err := s.RequestArrival(ctx, tag)
stat.inProgress.Add(1)
if err != nil {
if isResourceExhaustedErr(err) {
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) {
stat.resourceExhausted.Add(1)
return nil, &apistatus.ResourceExhausted{}
}
@ -233,9 +234,3 @@ func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation s
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
}
}
func isResourceExhaustedErr(err error) bool {
return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) ||
errors.Is(err, scheduling.ErrTagRequestsProhibited)
}

View file

@ -4,6 +4,8 @@ import (
"errors"
"fmt"
"math"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
)
var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any")
@ -12,17 +14,17 @@ type tagConfig struct {
Shares, Limit, Reserved *float64
}
func (c *LimiterConfig) Validate() error {
if err := validateOpConfig(c.Read); err != nil {
func validateConfig(c *limits.Config) error {
if err := validateOpConfig(c.Read()); err != nil {
return fmt.Errorf("limits 'read' section validation error: %w", err)
}
if err := validateOpConfig(c.Write); err != nil {
if err := validateOpConfig(c.Write()); err != nil {
return fmt.Errorf("limits 'write' section validation error: %w", err)
}
return nil
}
func validateOpConfig(c OpConfig) error {
func validateOpConfig(c limits.OpConfig) error {
if c.MaxRunningOps <= 0 {
return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps)
}
@ -38,7 +40,7 @@ func validateOpConfig(c OpConfig) error {
return nil
}
func validateTags(configTags []IOTagConfig) error {
func validateTags(configTags []limits.IOTagConfig) error {
tags := map[IOTag]tagConfig{
IOTagBackground: {},
IOTagClient: {},

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -411,11 +410,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -484,12 +483,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -560,12 +559,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -597,3 +596,26 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil
}
type testNetmapSource struct {
netmaps map[uint64]*netmap.NetMap
currentEpoch uint64
}
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff")
}
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, fmt.Errorf("netmap not found")
}
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}

View file

@ -13,13 +13,6 @@ type ECInfo struct {
Total uint32
}
func (v *ECInfo) String() string {
if v == nil {
return "<nil>"
}
return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total)
}
// Info groups object address with its FrostFS
// object info.
type Info struct {
@ -30,5 +23,5 @@ type Info struct {
}
func (v Info) String() string {
return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo)
return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject)
}

View file

@ -50,7 +50,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper,
var err error
s.netmapProcessor, err = netmap.New(&netmap.Params{
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
PoolSize: poolSize,
NetmapClient: netmap.NewNetmapClient(s.netmapClient),
@ -159,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
} else {
// create governance processor
governanceProcessor, err := governance.New(&governance.Params{
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
AlphabetState: s,
@ -225,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er
// create alphabet processor
s.alphabetProcessor, err = alphabet.New(&alphabet.Params{
ParsedWallets: parsedWallets,
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
PoolSize: poolSize,
AlphabetContracts: s.contracts.alphabet,
@ -247,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c
s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize))
// container processor
containerProcessor, err := cont.New(&cont.Params{
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
PoolSize: poolSize,
AlphabetState: s,
@ -268,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro
s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize))
// create balance processor
balanceProcessor, err := balance.New(&balance.Params{
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
PoolSize: poolSize,
FrostFSClient: frostfsCli,
@ -291,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip
s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize))
frostfsProcessor, err := frostfs.New(&frostfs.Params{
Log: s.log.WithTag(logger.TagProcessor),
Log: s.log,
Metrics: s.irMetrics,
PoolSize: poolSize,
FrostFSContract: s.contracts.frostfs,
@ -342,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg
controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient,
controlsrv.WithAllowedKeys(authKeys),
), log.WithTag(logger.TagGrpcSvc), audit)
), log, audit)
grpcControlSrv := grpc.NewServer()
control.RegisterControlServiceServer(grpcControlSrv, controlSvc)
@ -458,7 +458,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
}
morphChain := &chainParams{
log: s.log.WithTag(logger.TagMorph),
log: s.log,
cfg: cfg,
key: s.key,
name: morphPrefix,

View file

@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
) (*Server, error) {
var err error
server := &Server{
log: log.WithTag(logger.TagIr),
log: log,
irMetrics: metrics,
cmode: cmode,
}

View file

@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string {
return b.path
}
// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
type levelDBManager struct {
// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
type levelDbManager struct {
dbMtx *sync.RWMutex
databases map[uint64]*sharedDB
@ -157,8 +157,8 @@ type levelDBManager struct {
func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string,
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger,
) *levelDBManager {
result := &levelDBManager{
) *levelDbManager {
result := &levelDbManager{
databases: make(map[uint64]*sharedDB),
dbMtx: &sync.RWMutex{},
@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st
return result
}
func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
res := m.getDBIfExists(idx)
if res != nil {
return res
@ -181,14 +181,14 @@ func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB {
return m.getOrCreateDB(idx)
}
func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB {
func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
return m.databases[idx]
}
func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB {
m.dbMtx.Lock()
defer m.dbMtx.Unlock()
@ -202,7 +202,7 @@ func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB {
return db
}
func (m *levelDBManager) hasAnyDB() bool {
func (m *levelDbManager) hasAnyDB() bool {
m.dbMtx.RLock()
defer m.dbMtx.RUnlock()
@ -213,7 +213,7 @@ func (m *levelDBManager) hasAnyDB() bool {
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
levelToManager map[string]*levelDBManager
levelToManager map[string]*levelDbManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool,
options: options,
readOnly: readOnly,
metrics: metrics,
levelToManager: make(map[string]*levelDBManager),
levelToManager: make(map[string]*levelDbManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
@ -266,7 +266,7 @@ func (m *dbManager) Close() {
m.dbCounter.WaitUntilAllClosed()
}
func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager {
return m.getOrCreateLevelManager(lvlPath)
}
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager {
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager {
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()

View file

@ -328,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo
return nil
}
func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) {
func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
@ -341,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB)
b.dbFilesGuard.Lock()
defer b.dbFilesGuard.Unlock()
if err := shDB.CloseAndRemoveFile(ctx); err != nil {
if err := shDb.CloseAndRemoveFile(ctx); err != nil {
return false, err
}
b.commondbManager.CleanResources(path)

View file

@ -153,5 +153,5 @@ func WithMetrics(m Metrics) Option {
}
func (b *BlobStor) Compressor() *compression.Config {
return &b.compression
return &b.cfg.compression
}

View file

@ -50,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc)
_, err := s.Iterate(context.Background(), iterPrm)
require.NoError(t, err)
require.Len(t, objects, len(seen))
require.Equal(t, len(objects), len(seen))
for i := range objects {
d, ok := seen[objects[i].addr.String()]
require.True(t, ok)

View file

@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
csRes, err := sh.ContainerSize(ctx, csPrm)
csRes, err := sh.Shard.ContainerSize(ctx, csPrm)
if err != nil {
e.reportShardError(ctx, sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
@ -119,7 +119,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
uniqueIDs := make(map[string]cid.ID)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
e.reportShardError(ctx, sh, "can't get list of containers", err)
return false

View file

@ -22,6 +22,10 @@ type shardInitError struct {
// Open opens all StorageEngine's components.
func (e *StorageEngine) Open(ctx context.Context) error {
return e.open(ctx)
}
func (e *StorageEngine) open(ctx context.Context) error {
e.mtx.Lock()
defer e.mtx.Unlock()
@ -73,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
errCh := make(chan shardInitError, len(e.shards))
var eg errgroup.Group
if e.lowMem && e.anyShardRequiresRefill() {
if e.cfg.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1)
}
@ -145,11 +149,11 @@ var errClosed = errors.New("storage engine is closed")
func (e *StorageEngine) Close(ctx context.Context) error {
close(e.closeCh)
defer e.wg.Wait()
return e.closeEngine(ctx)
return e.setBlockExecErr(ctx, errClosed)
}
// closes all shards. Never returns an error, shard errors are logged.
func (e *StorageEngine) closeAllShards(ctx context.Context) error {
func (e *StorageEngine) close(ctx context.Context) error {
e.mtx.RLock()
defer e.mtx.RUnlock()
@ -172,23 +176,70 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error {
e.blockExec.mtx.RLock()
defer e.blockExec.mtx.RUnlock()
if e.blockExec.closed {
return errClosed
if e.blockExec.err != nil {
return e.blockExec.err
}
return op()
}
func (e *StorageEngine) closeEngine(ctx context.Context) error {
// sets the flag of blocking execution of all data operations according to err:
// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method
// (if err == errClosed => additionally releases pools and does not allow to resume executions).
// - otherwise, resumes execution. If exec was blocked, calls open method.
//
// Can be called concurrently with exec. In this case it waits for all executions to complete.
func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error {
e.blockExec.mtx.Lock()
defer e.blockExec.mtx.Unlock()
if e.blockExec.closed {
prevErr := e.blockExec.err
wasClosed := errors.Is(prevErr, errClosed)
if wasClosed {
return errClosed
}
e.blockExec.closed = true
return e.closeAllShards(ctx)
e.blockExec.err = err
if err == nil {
if prevErr != nil { // block -> ok
return e.open(ctx)
}
} else if prevErr == nil { // ok -> block
return e.close(ctx)
}
// otherwise do nothing
return nil
}
// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err.
// To resume the execution, use ResumeExecution method.
//
// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources
// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions
// to complete). Returns error if any Close has been called before.
//
// Must not be called concurrently with either Open or Init.
//
// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution
// for this.
func (e *StorageEngine) BlockExecution(err error) error {
return e.setBlockExecErr(context.Background(), err)
}
// ResumeExecution resumes the execution of any data-related operation.
// To block the execution, use BlockExecution method.
//
// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources
// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions
// to complete). Returns error if any Close has been called before.
//
// Must not be called concurrently with either Open or Init.
func (e *StorageEngine) ResumeExecution() error {
return e.setBlockExecErr(context.Background(), nil)
}
type ReConfiguration struct {

View file

@ -2,6 +2,7 @@ package engine
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
@ -11,14 +12,17 @@ import (
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
)
@ -159,6 +163,42 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
require.Equal(t, 1, shardCount)
}
func TestExecBlocks(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many
// put some object
obj := testutil.GenerateObjectWithCID(cidtest.ID())
addr := object.AddressOf(obj)
require.NoError(t, Put(context.Background(), e, obj, false))
// block executions
errBlock := errors.New("block exec err")
require.NoError(t, e.BlockExecution(errBlock))
// try to exec some op
_, err := Head(context.Background(), e, addr)
require.ErrorIs(t, err, errBlock)
// resume executions
require.NoError(t, e.ResumeExecution())
_, err = Head(context.Background(), e, addr) // can be any data-related op
require.NoError(t, err)
// close
require.NoError(t, e.Close(context.Background()))
// try exec after close
_, err = Head(context.Background(), e, addr)
require.Error(t, err)
// try to resume
require.Error(t, e.ResumeExecution())
}
func TestPersistentShardID(t *testing.T) {
dir := t.TempDir()

View file

@ -33,8 +33,9 @@ type StorageEngine struct {
wg sync.WaitGroup
blockExec struct {
mtx sync.RWMutex
closed bool
mtx sync.RWMutex
err error
}
evacuateLimiter *evacuationLimiter
}
@ -211,18 +212,12 @@ func New(opts ...Option) *StorageEngine {
opts[i](c)
}
evLimMtx := &sync.RWMutex{}
evLimCond := sync.NewCond(evLimMtx)
return &StorageEngine{
cfg: c,
shards: make(map[string]hashedShard),
closeCh: make(chan struct{}),
setModeCh: make(chan setModeRequest),
evacuateLimiter: &evacuationLimiter{
guard: evLimMtx,
statusCond: evLimCond,
},
cfg: c,
shards: make(map[string]hashedShard),
closeCh: make(chan struct{}),
setModeCh: make(chan setModeRequest),
evacuateLimiter: &evacuationLimiter{},
}
}

View file

@ -2,11 +2,8 @@ package engine
import (
"context"
"fmt"
"path/filepath"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
@ -160,74 +157,26 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
var _ qos.Limiter = (*testQoSLimiter)(nil)
type testQoSLimiter struct {
t testing.TB
quard sync.Mutex
id int64
readStacks map[int64][]byte
writeStacks map[int64][]byte
t testing.TB
read atomic.Int64
write atomic.Int64
}
func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
func (t *testQoSLimiter) Close() {
t.quard.Lock()
defer t.quard.Unlock()
var sb strings.Builder
var seqN int
for _, stack := range t.readStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
}
for _, stack := range t.writeStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
}
require.True(t.t, seqN == 0, sb.String())
require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0")
require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0")
}
func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock()
defer t.quard.Unlock()
stack := debug.Stack()
t.id++
id := t.id
if t.readStacks == nil {
t.readStacks = make(map[int64][]byte)
}
t.readStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.readStacks, id)
}, nil
t.read.Add(1)
return func() { t.read.Add(-1) }, nil
}
func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock()
defer t.quard.Unlock()
stack := debug.Stack()
t.id++
id := t.id
if t.writeStacks == nil {
t.writeStacks = make(map[int64][]byte)
}
t.writeStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.writeStacks, id)
}, nil
t.write.Add(1)
return func() { t.write.Add(-1) }, nil
}
func (t *testQoSLimiter) SetParentID(string) {}

View file

@ -95,7 +95,8 @@ func (s *EvacuationState) StartedAt() *time.Time {
if s == nil {
return nil
}
if s.startedAt.IsZero() {
defaultTime := time.Time{}
if s.startedAt == defaultTime {
return nil
}
return &s.startedAt
@ -105,7 +106,8 @@ func (s *EvacuationState) FinishedAt() *time.Time {
if s == nil {
return nil
}
if s.finishedAt.IsZero() {
defaultTime := time.Time{}
if s.finishedAt == defaultTime {
return nil
}
return &s.finishedAt
@ -139,8 +141,7 @@ type evacuationLimiter struct {
eg *errgroup.Group
cancel context.CancelFunc
guard *sync.RWMutex
statusCond *sync.Cond // used in unit tests
guard sync.RWMutex
}
func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) {
@ -166,7 +167,6 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res
startedAt: time.Now().UTC(),
result: result,
}
l.statusCond.Broadcast()
return l.eg, egCtx, nil
}
@ -182,7 +182,6 @@ func (l *evacuationLimiter) Complete(err error) {
l.state.processState = EvacuateProcessStateCompleted
l.state.errMessage = errMsq
l.state.finishedAt = time.Now().UTC()
l.statusCond.Broadcast()
l.eg = nil
}
@ -217,7 +216,6 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error {
l.state = EvacuationState{}
l.eg = nil
l.cancel = nil
l.statusCond.Broadcast()
return nil
}

View file

@ -204,10 +204,11 @@ func TestEvacuateShardObjects(t *testing.T) {
func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState {
var st *EvacuationState
var err error
e.evacuateLimiter.waitForCompleted()
st, err = e.GetEvacuationState(context.Background())
require.NoError(t, err)
require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
require.Eventually(t, func() bool {
st, err = e.GetEvacuationState(context.Background())
require.NoError(t, err)
return st.ProcessingStatus() == EvacuateProcessStateCompleted
}, 3*time.Second, 10*time.Millisecond)
return st
}
@ -816,12 +817,3 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
t.Logf("evacuate took %v\n", time.Since(start))
require.NoError(t, err)
}
func (l *evacuationLimiter) waitForCompleted() {
l.guard.Lock()
defer l.guard.Unlock()
for l.state.processState != EvacuateProcessStateCompleted {
l.statusCond.Wait()
}
}

View file

@ -227,7 +227,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.IsLocked(ctx, addr)
locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
outErr = err
@ -256,7 +256,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locks, err := h.GetLocks(ctx, addr)
locks, err := h.Shard.GetLocks(ctx, addr)
if err != nil {
e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
outErr = err

View file

@ -84,11 +84,17 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
if errors.As(err, &eiErr) {
eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr)
if !ok {
return false
eclocked := []oid.ID{locked}
for _, chunk := range eiErr.ECInfo().Chunks {
var objID oid.ID
err = objID.ReadFromV2(chunk.ID)
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return false
}
eclocked = append(eclocked, objID)
}
err = sh.Lock(ctx, idCnr, locker, eclocked)
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
@ -131,18 +137,3 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo
})
return
}
func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) {
eclocked := []oid.ID{locked}
for _, chunk := range eiErr.ECInfo().Chunks {
var objID oid.ID
err := objID.ReadFromV2(chunk.ID)
if err != nil {
e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr),
zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked))
return nil, false
}
eclocked = append(eclocked, objID)
}
return eclocked, true
}

View file

@ -118,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
}
e.metrics.SetMode(sh.ID().String(), sh.GetMode())
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}
@ -318,6 +318,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
// HandleNewEpoch notifies every shard about NewEpoch event.
func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
ev := shard.EventNewEpoch(epoch)
e.mtx.RLock()
defer e.mtx.RUnlock()
@ -325,7 +327,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
select {
case <-ctx.Done():
return
case sh.NotificationChannel() <- epoch:
case sh.NotificationChannel() <- ev:
default:
e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))

View file

@ -376,12 +376,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int {
return len(lst)
}
func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) {
bkt := tx.Bucket(item.name)
if bkt != nil {
return bkt.Delete(item.key)
_ = bkt.Delete(item.key) // ignore error, best effort there
}
return nil
}
func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@ -406,16 +405,19 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
// if list empty, remove the key from <list> bucket
if len(lst) == 0 {
return bkt.Delete(item.key)
_ = bkt.Delete(item.key) // ignore error, best effort there
return nil
}
// if list is not empty, then update it
encodedLst, err := encodeList(lst)
if err != nil {
return err
return nil // ignore error, best effort there
}
return bkt.Put(item.key, encodedLst)
_ = bkt.Put(item.key, encodedLst) // ignore error, best effort there
return nil
}
func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
@ -478,47 +480,35 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error
return ErrUnknownObjectType
}
if err := delUniqueIndexItem(tx, namedBucketItem{
delUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
key: objKey,
}); err != nil {
return err
}
})
} else {
if err := delUniqueIndexItem(tx, namedBucketItem{
delUniqueIndexItem(tx, namedBucketItem{
name: parentBucketName(cnr, bucketName),
key: objKey,
}); err != nil {
return err
}
})
}
if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index
name: smallBucketName(cnr, bucketName),
key: objKey,
}); err != nil {
return err
}
if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
})
delUniqueIndexItem(tx, namedBucketItem{ // remove from root index
name: rootBucketName(cnr, bucketName),
key: objKey,
}); err != nil {
return err
}
})
if expEpoch, ok := hasExpirationEpoch(obj); ok {
if err := delUniqueIndexItem(tx, namedBucketItem{
delUniqueIndexItem(tx, namedBucketItem{
name: expEpochToObjectBucketName,
key: expirationEpochKey(expEpoch, cnr, addr.Object()),
}); err != nil {
return err
}
if err := delUniqueIndexItem(tx, namedBucketItem{
})
delUniqueIndexItem(tx, namedBucketItem{
name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)),
key: objKey,
}); err != nil {
return err
}
})
}
return nil
@ -545,12 +535,10 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
// also drop EC parent root info if current EC chunk is the last one
if !hasAnyChunks {
if err := delUniqueIndexItem(tx, namedBucketItem{
delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
}); err != nil {
return err
}
})
}
if ech.ParentSplitParentID() == nil {
@ -584,10 +572,11 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
}
// drop split info
return delUniqueIndexItem(tx, namedBucketItem{
delUniqueIndexItem(tx, namedBucketItem{
name: rootBucketName(cnr, make([]byte, bucketKeySize)),
key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)),
})
return nil
}
func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool {

View file

@ -139,7 +139,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
bc := newBucketCache()
graveyardBkt := tx.Bucket(graveyardBucketName)
garbageBkt := tx.Bucket(garbageBucketName)
rawAddr := make([]byte, cidSize, addressKeySize)
@ -168,7 +169,7 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
result, count, cursor, threshold, currEpoch)
if err != nil {
return nil, nil, err
@ -203,10 +204,9 @@ loop:
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
func selectNFromBucket(
bc *bucketCache,
bkt *bbolt.Bucket, // main bucket
func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
@ -219,6 +219,7 @@ func selectNFromBucket(
cursor = new(Cursor)
}
count := len(to)
c := bkt.Cursor()
k, v := c.First()
@ -230,7 +231,7 @@ func selectNFromBucket(
}
for ; k != nil; k, v = c.Next() {
if len(to) >= limit {
if count >= limit {
break
}
@ -240,8 +241,6 @@ func selectNFromBucket(
}
offset = k
graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
@ -252,7 +251,7 @@ func selectNFromBucket(
}
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
if hasExpEpoch && expEpoch < currEpoch && !objectLocked(bkt.Tx(), cnt, obj) {
continue
}
@ -274,6 +273,7 @@ func selectNFromBucket(
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
count++
}
return to, offset, cursor, nil

View file

@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
return lm.FromBytes(data[16:])
return lm.Meta.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
size := 8 + 8 + lm.Size() + 1
size := 8 + 8 + lm.Meta.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
lm.EncodeBinary(w.BinWriter)
lm.Meta.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)

View file

@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
children := s.getChildren(nodeID)
children := s.tree.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@ -222,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
children := s.getChildren(nodeID)
children := s.tree.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{

View file

@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
s.infoMap[op.Child] = op.Old
s.tree.infoMap[op.Child] = op.Old
} else {
delete(s.infoMap, op.Child)
delete(s.tree.infoMap, op.Child)
}
}
@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
shouldPut := !s.isAncestor(op.Child, op.Parent)
p, ok := s.infoMap[op.Child]
shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
p, ok := s.tree.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
s.infoMap[op.Child] = p
s.tree.infoMap[op.Child] = p
return lm
}
@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
}
var nodes []Node
var lastTS Timestamp
var lastTs Timestamp
children := t.getChildren(curNode)
for i := range children {
@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node {
fileName := string(info.Meta.GetAttr(attr))
if fileName == path[len(path)-1] {
if latest {
if info.Meta.Time >= lastTS {
if info.Meta.Time >= lastTs {
nodes = append(nodes[:0], children[i])
}
} else {

View file

@ -108,17 +108,19 @@ func (s *Shard) Init(ctx context.Context) error {
s.updateMetrics(ctx)
s.gc = &gc{
gcCfg: &s.gcCfg,
remover: s.removeGarbage,
stopChannel: make(chan struct{}),
newEpochChan: make(chan uint64),
newEpochHandlers: &newEpochHandlers{
cancelFunc: func() {},
handlers: []newEpochHandler{
s.collectExpiredLocks,
s.collectExpiredObjects,
s.collectExpiredTombstones,
s.collectExpiredMetrics,
gcCfg: &s.gcCfg,
remover: s.removeGarbage,
stopChannel: make(chan struct{}),
eventChan: make(chan Event),
mEventHandler: map[eventType]*eventHandlers{
eventNewEpoch: {
cancelFunc: func() {},
handlers: []eventHandler{
s.collectExpiredLocks,
s.collectExpiredObjects,
s.collectExpiredTombstones,
s.collectExpiredMetrics,
},
},
},
}
@ -214,8 +216,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
}
eg, egCtx := errgroup.WithContext(ctx)
if s.refillMetabaseWorkersCount > 0 {
eg.SetLimit(s.refillMetabaseWorkersCount)
if s.cfg.refillMetabaseWorkersCount > 0 {
eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
}
var completedCount uint64
@ -363,7 +365,6 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
// Close releases all Shard's components.
func (s *Shard) Close(ctx context.Context) error {
unlock := s.lockExclusive()
if s.rb != nil {
s.rb.Stop(ctx, s.log)
}
@ -389,19 +390,15 @@ func (s *Shard) Close(ctx context.Context) error {
}
}
if s.opsLimiter != nil {
s.opsLimiter.Close()
}
unlock()
// GC waits for handlers and remover to complete. Handlers may try to lock shard's lock.
// So to prevent deadlock GC stopping is outside of exclusive lock.
// If Init/Open was unsuccessful gc can be nil.
if s.gc != nil {
s.gc.stop(ctx)
}
if s.opsLimiter != nil {
s.opsLimiter.Close()
}
return lastErr
}

View file

@ -33,14 +33,41 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
type newEpochHandler func(context.Context, uint64)
// Event represents class of external events.
type Event interface {
typ() eventType
}
type newEpochHandlers struct {
type eventType int
const (
_ eventType = iota
eventNewEpoch
)
type newEpoch struct {
epoch uint64
}
func (e newEpoch) typ() eventType {
return eventNewEpoch
}
// EventNewEpoch returns new epoch event.
func EventNewEpoch(e uint64) Event {
return newEpoch{
epoch: e,
}
}
type eventHandler func(context.Context, Event)
type eventHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
handlers []newEpochHandler
handlers []eventHandler
}
type gcRunResult struct {
@ -82,10 +109,10 @@ type gc struct {
remover func(context.Context) gcRunResult
// newEpochChan is used only for listening for the new epoch event.
// eventChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
newEpochChan chan uint64
newEpochHandlers *newEpochHandlers
eventChan chan Event
mEventHandler map[eventType]*eventHandlers
}
type gcCfg struct {
@ -115,7 +142,15 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
sz := 0
for _, v := range gc.mEventHandler {
sz += len(v.handlers)
}
if sz > 0 {
gc.workerPool = gc.workerPoolInit(sz)
}
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
gc.wg.Add(2)
go gc.tickRemover(ctx)
@ -133,7 +168,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
case <-ctx.Done():
gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
return
case event, ok := <-gc.newEpochChan:
case event, ok := <-gc.eventChan:
if !ok {
gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
return
@ -144,33 +179,38 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
gc.newEpochHandlers.cancelFunc()
gc.newEpochHandlers.prevGroup.Wait()
func (gc *gc) handleEvent(ctx context.Context, event Event) {
v, ok := gc.mEventHandler[event.typ()]
if !ok {
return
}
v.cancelFunc()
v.prevGroup.Wait()
var runCtx context.Context
runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
runCtx, v.cancelFunc = context.WithCancel(ctx)
gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
v.prevGroup.Add(len(v.handlers))
for i := range gc.newEpochHandlers.handlers {
for i := range v.handlers {
select {
case <-ctx.Done():
return
default:
}
h := gc.newEpochHandlers.handlers[i]
h := v.handlers[i]
err := gc.workerPool.Submit(func() {
defer gc.newEpochHandlers.prevGroup.Done()
h(runCtx, epoch)
defer v.prevGroup.Done()
h(runCtx, event)
})
if err != nil {
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
zap.Error(err),
)
gc.newEpochHandlers.prevGroup.Done()
v.prevGroup.Done()
}
}
}
@ -227,9 +267,6 @@ func (gc *gc) stop(ctx context.Context) {
gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
gc.newEpochHandlers.cancelFunc()
gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@ -320,12 +357,12 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
}
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
return
}
func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -333,8 +370,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@ -343,7 +380,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@ -449,7 +486,7 @@ func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeR
return s.metaBase.Inhume(ctx, inhumePrm)
}
func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -457,6 +494,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
@ -489,8 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
return
}
var release qos.ReleaseFunc
release, err = s.opsLimiter.ReadRequest(ctx)
release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
@ -528,7 +565,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -536,8 +573,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@ -547,14 +584,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
s.expiredLocksCallback(egCtx, epoch, expired)
s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@ -568,7 +605,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
s.expiredLocksCallback(egCtx, epoch, expired)
s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
}
@ -667,10 +704,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
if s.GetMode().NoMetabase() {
return
}
@ -733,10 +767,7 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
// HandleDeletedLocks unlocks all objects which were locked by lockers.
func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
if s.GetMode().NoMetabase() {
return
}
@ -753,15 +784,17 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
}
}
// NotificationChannel returns channel for new epoch events.
func (s *Shard) NotificationChannel() chan<- uint64 {
return s.gc.newEpochChan
// NotificationChannel returns channel for shard events.
func (s *Shard) NotificationChannel() chan<- Event {
return s.gc.eventChan
}
func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
epoch := e.(newEpoch).epoch
s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))

View file

@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
sh.gc.handleEvent(context.Background(), epoch.Value)
sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
sh.gc.handleEvent(context.Background(), epoch.Value)
sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")

View file

@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
}
shardID := s.info.ID.String()
s.metricsWriter.SetShardID(shardID)
s.cfg.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}

View file

@ -218,7 +218,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
return s.useWriteCache
return s.cfg.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@ -379,15 +379,15 @@ func WithLimiter(l qos.Limiter) Option {
}
func (s *Shard) fillInfo() {
s.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.info.BlobStorInfo = s.blobStor.DumpInfo()
s.info.Mode = s.GetMode()
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
s.cfg.info.Mode = s.GetMode()
if s.useWriteCache {
s.info.WriteCacheInfo = s.writeCache.DumpInfo()
if s.cfg.useWriteCache {
s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
s.info.PiloramaInfo = s.pilorama.DumpInfo()
s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@ -454,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
s.metricsWriter.SetMode(s.info.Mode)
s.cfg.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
s.metricsWriter.IncObjectCounter(physical)
s.metricsWriter.IncObjectCounter(logical)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
s.cfg.metricsWriter.IncObjectCounter(physical)
s.cfg.metricsWriter.IncObjectCounter(logical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
if isUser {
s.metricsWriter.IncObjectCounter(user)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
s.cfg.metricsWriter.IncObjectCounter(user)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
if v > 0 {
s.metricsWriter.AddToObjectCounter(typ, -int(v))
s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
if v > 0 {
s.metricsWriter.SetObjectCounter(typ, v)
s.cfg.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
for cnrID, count := range byCnr {
if count.Phy > 0 {
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
if size != 0 {
s.metricsWriter.AddToContainerSize(cnr, size)
s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
if size != 0 {
s.metricsWriter.AddToPayloadSize(size)
s.cfg.metricsWriter.AddToPayloadSize(size)
}
}

View file

@ -151,6 +151,20 @@ func (e *notHaltStateError) Error() string {
)
}
// implementation of error interface for FrostFS-specific errors.
type frostfsError struct {
err error
}
func (e frostfsError) Error() string {
return fmt.Sprintf("frostfs error: %v", e.err)
}
// wraps FrostFS-specific error into frostfsError. Arg must not be nil.
func wrapFrostFSError(err error) error {
return frostfsError{err}
}
// Invoke invokes contract method by sending transaction into blockchain.
// Returns valid until block value.
// Supported args types: int64, string, util.Uint160, []byte and bool.
@ -214,7 +228,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
if err != nil {
return err
} else if val.State != HaltState {
return &notHaltStateError{state: val.State, exception: val.FaultException}
return wrapFrostFSError(&notHaltStateError{state: val.State, exception: val.FaultException})
}
arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err)
@ -278,7 +292,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) (
}
if val.State != HaltState {
return nil, &notHaltStateError{state: val.State, exception: val.FaultException}
return nil, wrapFrostFSError(&notHaltStateError{state: val.State, exception: val.FaultException})
}
success = true

View file

@ -2,6 +2,7 @@ package netmap
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
@ -25,24 +26,44 @@ const (
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, MaxObjectSizeConfig)
objectSize, err := c.readUInt64Config(ctx, MaxObjectSizeConfig)
if err != nil {
return 0, err
}
return objectSize, nil
}
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
func (c *Client) EpochDuration(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, EpochDurationConfig)
epochDuration, err := c.readUInt64Config(ctx, EpochDurationConfig)
if err != nil {
return 0, err
}
return epochDuration, nil
}
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
func (c *Client) ContainerFee(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, ContainerFeeConfig)
fee, err := c.readUInt64Config(ctx, ContainerFeeConfig)
if err != nil {
return 0, err
}
return fee, nil
}
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, ContainerAliasFeeConfig)
fee, err := c.readUInt64Config(ctx, ContainerAliasFeeConfig)
if err != nil {
return 0, err
}
return fee, nil
}
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
@ -56,13 +77,23 @@ func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) {
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, IrCandidateFeeConfig)
fee, err := c.readUInt64Config(ctx, IrCandidateFeeConfig)
if err != nil {
return 0, err
}
return fee, nil
}
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) {
return c.readUInt64Config(ctx, WithdrawFeeConfig)
fee, err := c.readUInt64Config(ctx, WithdrawFeeConfig)
if err != nil {
return 0, err
}
return fee, nil
}
// MaintenanceModeAllowed reads admission of "maintenance" state from the
@ -75,27 +106,29 @@ func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) {
}
func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) {
v, err := c.config(ctx, []byte(key))
v, err := c.config(ctx, []byte(key), IntegerAssert)
if err != nil {
return 0, fmt.Errorf("read netconfig value '%s': %w", key, err)
}
bi, err := v.TryInteger()
if err != nil {
return 0, err
}
return bi.Uint64(), nil
// IntegerAssert is guaranteed to return int64 if the error is nil.
return uint64(v.(int64)), nil
}
// reads boolean value by the given key from the FrostFS network configuration
// stored in the Sidechain. Returns false if key is not presented.
func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) {
v, err := c.config(ctx, []byte(key))
v, err := c.config(ctx, []byte(key), BoolAssert)
if err != nil {
if errors.Is(err, ErrConfigNotFound) {
return false, nil
}
return false, fmt.Errorf("read netconfig value '%s': %w", key, err)
}
return v.TryBool()
// BoolAssert is guaranteed to return bool if the error is nil.
return v.(bool), nil
}
// SetConfigPrm groups parameters of SetConfig operation.
@ -244,11 +277,15 @@ func bytesToBool(val []byte) bool {
return false
}
// ErrConfigNotFound is returned when the requested key was not found
// in the network config (returned value is `Null`).
var ErrConfigNotFound = errors.New("config value not found")
// config performs the test invoke of get config value
// method of FrostFS Netmap contract.
//
// Returns ErrConfigNotFound if config key is not found in the contract.
func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) {
func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.Item) (any, error)) (any, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(configMethod)
prm.SetArgs(key)
@ -264,7 +301,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error)
configMethod, ln)
}
return items[0], nil
if _, ok := items[0].(stackitem.Null); ok {
return nil, ErrConfigNotFound
}
return assert(items[0])
}
// IntegerAssert converts stack item to int64.
func IntegerAssert(item stackitem.Item) (any, error) {
return client.IntFromStackItem(item)
}
// StringAssert converts stack item to string.
func StringAssert(item stackitem.Item) (any, error) {
return client.StringFromStackItem(item)
}
// BoolAssert converts stack item to bool.
func BoolAssert(item stackitem.Item) (any, error) {
return client.BoolFromStackItem(item)
}
// iterateRecords iterates over all config records and passes them to f.

Some files were not shown because too many files have changed in this diff Show more