Compare commits

..

2 commits

Author SHA1 Message Date
a0c261104e [#1512] object: Fix writePart for EC-container
All checks were successful
DCO action / DCO (pull_request) Successful in 4m15s
Vulncheck / Vulncheck (pull_request) Successful in 4m17s
Tests and linters / Run gofumpt (pull_request) Successful in 4m27s
Build / Build Components (pull_request) Successful in 5m13s
Pre-commit hooks / Pre-commit (pull_request) Successful in 5m12s
Tests and linters / Staticcheck (pull_request) Successful in 5m20s
Tests and linters / gopls check (pull_request) Successful in 5m45s
Tests and linters / Tests (pull_request) Successful in 6m17s
Tests and linters / Tests with -race (pull_request) Successful in 6m22s
Tests and linters / Lint (pull_request) Successful in 6m36s
* Immediatly return after `ObjectAlreadyRemoved` error.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-25 11:23:28 +03:00
213298c53f [#1512] object: Make raw PutSingle check status within response
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-24 21:26:31 +03:00
352 changed files with 2230 additions and 6054 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -19,7 +19,6 @@ jobs:
uses: actions/setup-go@v3
with:
go-version: '1.23'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -22,11 +22,6 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
gci:
sections:
- standard
- default
custom-order: true
govet:
# report about shadowed variables
check-shadowing: false
@ -77,7 +72,6 @@ linters:
- durationcheck
- exhaustive
- copyloopvar
- gci
- gofmt
- goimports
- misspell
@ -95,7 +89,5 @@ linters:
- protogetter
- intrange
- tenv
- unconvert
- unparam
disable-all: true
fast: false

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2
LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container
docker/%:
docker run --rm -t \

View file

@ -16,16 +16,9 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r"
WalletPath = "wallet"
WalletPathShorthand = "w"
WalletPathUsage = "Path to the wallet"
AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
AdminWalletPath = "wallet-admin"
AdminWalletUsage = "Path to the admin wallet"
LocalDumpFlag = "local-dump"
ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts"

View file

@ -28,7 +28,6 @@ const (
var (
errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
)
var UpgradeCmd = &cobra.Command{
@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil {
return err
}
allSuccess := true
for mb, ok := range result {
if ok {
cmd.Println(mb, ": success")
} else {
cmd.Println(mb, ": failed")
allSuccess = false
}
}
if allSuccess {
return nil
}
return errUpgradeFailed
return nil
}
func getMetabasePaths(appCfg *config.Config) ([]string, error) {

View file

@ -3,8 +3,6 @@ package ape
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -78,8 +76,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he
c, err := helper.NewRemoteClient(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160

View file

@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return helper.InvalidConfigValueErr(k)
}
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
}

View file

@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
if info.version == "" {
info.version = "unknown"
}
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
info.name, info.version, info.hash.StringLE())))
}
_ = tw.Flush()

View file

@ -1,7 +1,6 @@
package frostfsid
import (
"errors"
"fmt"
"math/big"
"sort"
@ -39,11 +38,6 @@ const (
groupIDFlag = "group-id"
rootNamespacePlaceholder = "<root>"
keyFlag = "key"
keyDescFlag = "Key for storing a value in the subject's KV storage"
valueFlag = "value"
valueDescFlag = "Value to be stored in the subject's KV storage"
)
var (
@ -157,23 +151,6 @@ var (
},
Run: frostfsidListGroupSubjects,
}
frostfsidSetKVCmd = &cobra.Command{
Use: "set-kv",
Short: "Store a key-value pair in the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidSetKV,
}
frostfsidDeleteKVCmd = &cobra.Command{
Use: "delete-kv",
Short: "Delete a value from the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidDeleteKV,
}
)
func initFrostfsIDCreateNamespaceCmd() {
@ -259,21 +236,6 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
}
func initFrostfsIDSetKVCmd() {
Cmd.AddCommand(frostfsidSetKVCmd)
frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
}
func initFrostfsIDDeleteKVCmd() {
Cmd.AddCommand(frostfsidDeleteKVCmd)
frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
}
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
@ -291,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -343,7 +305,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -357,7 +319,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items)
@ -403,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -441,45 +403,6 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
}
func frostfsidSetKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
value, _ := cmd.Flags().GetString(valueFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "set KV: %w", err)
}
func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
}
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd)
@ -492,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID)
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -569,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
}
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool
res := make([]stackitem.Item, 0)
for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil {
return nil, err
}
res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize
shouldStop = len(items) < batchSize
}
return res, nil

View file

@ -12,8 +12,6 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd()
initFrostfsIDSetKVCmd()
initFrostfsIDDeleteKVCmd()
initFrostfsIDAddSubjectKeyCmd()
initFrostfsIDRemoveSubjectKeyCmd()
}

View file

@ -3,6 +3,9 @@ package helper
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -13,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -24,86 +28,32 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke
}
type AlphabetWallets struct {
Label string
Path string
}
func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
w, err := GetAlphabetWallets(v, a.Path)
if err != nil {
return nil, err
}
var accounts []*wallet.Account
for _, wall := range w {
acc, err := GetWalletAccount(wall, a.Label)
if err != nil {
return nil, err
}
accounts = append(accounts, acc)
}
return accounts, nil
}
type RegularWallets struct{ Path string }
func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
w, err := getRegularWallet(r.Path)
if err != nil {
return nil, err
}
return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
}
// NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation.
//
// If wallets are provided, the contract client will use accounts with accName name from these wallets.
// To determine which account name should be used in a contract client, refer to how the contract
// verifies the transaction signature.
func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) {
func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) {
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
var act *actor.Actor
var accounts []*wallet.Account
var signers []actor.SignerAccount
if alphabet != nil {
account, err := alphabet.GetAccount(viper.GetViper())
if err != nil {
return nil, err
}
wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: account[0],
})
for _, w := range wallets {
acc, err := GetWalletAccount(w, accName)
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
accounts = append(accounts, acc)
}
for _, w := range regularWallets {
if w == nil {
continue
}
account, err := w.GetAccount()
if err != nil {
return nil, err
}
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: account[0],
})
}
act, err := actor.New(c, signers)
act, err = actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
Account: accounts[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: accounts[0],
}})
if err != nil {
return nil, err
}

View file

@ -3,7 +3,6 @@ package helper
import (
"errors"
"fmt"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -119,8 +118,11 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
return err
}
for k, v := range m {
if slices.Contains(NetmapConfigKeys, k) {
md[k] = v
for _, key := range NetmapConfigKeys {
if k == key {
md[k] = v
break
}
}
}
return nil

View file

@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
@ -23,27 +22,6 @@ import (
"github.com/spf13/viper"
)
func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
return nil, err
}
password, err := input.ReadPassword("Enter password for wallet:")
if err != nil {
return nil, fmt.Errorf("can't fetch password: %w", err)
}
for i := range w.Accounts {
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
err = fmt.Errorf("can't unlock wallet: %w", err)
break
}
}
return w, err
}
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir)
if err != nil {
@ -73,7 +51,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) {
err = nil
} else {
err = fmt.Errorf("can't open alphabet wallet: %w", err)
err = fmt.Errorf("can't open wallet: %w", err)
}
break
}

View file

@ -22,14 +22,15 @@ import (
)
const (
gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
)
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
}
func transferFunds(c *helper.InitializeContext) error {
@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error {
return err
}
version, err := c.Client.GetVersion()
if err != nil {
return err
}
var transfers []transferTarget
for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash()
@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
Amount: initialCommitteeGASAmount(c),
},
transferTarget{
Token: neo.Hash,
@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error {
// transferFundsFinished checks balances of accounts we transfer GAS to.
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
if err != nil {
return false, err
}
acc := c.Accounts[0]
version, err := c.Client.GetVersion()
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(acc.Contract.ScriptHash())
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
return false, err
}
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
if err != nil {
return false, err
}
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {

View file

@ -6,9 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func initRegisterCmd() {
@ -21,7 +19,6 @@ func initRegisterCmd() {
registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
}
@ -51,7 +48,6 @@ func initDeleteCmd() {
deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
}
@ -66,28 +62,3 @@ func deleteDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
cmd.Println("Domain deleted successfully")
}
func initSetAdminCmd() {
Cmd.AddCommand(setAdminCmd)
setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
_ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
_ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
}
func setAdmin(cmd *cobra.Command, _ []string) {
c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
_, err = actor.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
cmd.Println("Set admin successfully")
}

View file

@ -1,11 +1,7 @@
package nns
import (
"errors"
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -20,32 +16,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
c, err := helper.NewRemoteClient(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag))
walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
var (
alphabet *helper.AlphabetWallets
regularWallets []*helper.RegularWallets
)
if alphabetWalletPath != "" {
alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
}
if walletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
}
if adminWalletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
}
if alphabet == nil && regularWallets == nil {
commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
}
ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker)

View file

@ -19,7 +19,6 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@ -41,7 +40,6 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
@ -54,7 +52,6 @@ func initDelRecordCmd() {
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)

View file

@ -39,7 +39,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: registerDomain,
}
@ -49,7 +48,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: deleteDomain,
}
@ -77,7 +75,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: addRecord,
}
@ -95,7 +92,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: delRecords,
}
@ -105,21 +101,9 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
},
Run: delRecord,
}
setAdminCmd = &cobra.Command{
Use: "set-admin",
Short: "Sets admin for domain",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
},
Run: setAdmin,
}
)
func init() {
@ -132,5 +116,4 @@ func init() {
initGetRecordsCmd()
initDelRecordsCmd()
initDelRecordCmd()
initSetAdminCmd()
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
addr, _ := cmd.Flags().GetString(walletAccountFlag)
if addr != "" {
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@ -73,9 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
if till <= 0 {
return errInvalidNotaryDepositLifetime
till := int64(defaultNotaryDepositLifetime)
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime
}
}
return transferGas(cmd, acc, accHash, gasAmount, till)

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
}
func init() {

View file

@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -20,32 +20,23 @@ const (
accountAddressFlag = "account"
)
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
var addrs []util.Uint160
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
for _, acc := range accs {
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
addrs = append(addrs, addr)
}
return addrs
}
func addProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "addAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func removeProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "removeAccount")
acc, _ := cmd.Flags().GetString(accountAddressFlag)
addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
}
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
return fmt.Errorf("can't initialize context: %w", err)
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
}
bw := io.NewBufBinWriter()
for _, addr := range addrs {
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
}
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err

View file

@ -29,15 +29,13 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}

View file

@ -40,6 +40,8 @@ morph:
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard:
default: # section with the default shard parameters
metabase:

View file

@ -11,7 +11,6 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
@ -31,6 +30,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
@ -105,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
@ -410,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {

View file

@ -9,6 +9,7 @@ import (
"io"
"os"
"slices"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -76,7 +77,9 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
slices.SortFunc(list, cid.ID.Cmp)
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
})
return list
}
@ -684,7 +687,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err)
}
slices.SortFunc(list, oid.ID.Cmp)
slices.SortFunc(list, func(a, b oid.ID) int {
return strings.Compare(a.EncodeToString(), b.EncodeToString())
})
return &SearchObjectsRes{
ids: list,

View file

@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint"
RPCShorthand = "r"
RPCDefault = ""
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')"
RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
Timeout = "timeout"
TimeoutShorthand = "t"

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo
}
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{
cmd: cmd,
nodes: map[string]netmap.NodeInfo{},
}
}, nil
}
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd)
repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
},
}

View file

@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
_, _ = tw.Write([]byte("#\tName\tType\n"))
for i, t := range targets {
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
}
_ = tw.Flush()
cmd.Print(buf.String())

View file

@ -1,117 +0,0 @@
package control
import (
"bytes"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
"github.com/spf13/cobra"
)
const (
FullInfoFlag = "full"
FullInfoFlagUsage = "Print full ShardInfo."
)
var locateObjectCmd = &cobra.Command{
Use: "locate-object",
Short: "List shards storing the object",
Long: "List shards storing the object",
Run: locateObject,
}
func initControlLocateObjectCmd() {
initControlFlags(locateObjectCmd)
flags := locateObjectCmd.Flags()
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag)
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.")
flags.Bool(FullInfoFlag, false, FullInfoFlagUsage)
}
func locateObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
_ = object.ReadObjectAddress(cmd, &cnr, &obj)
pk := key.Get(cmd)
body := new(control.ListShardsForObjectRequest_Body)
body.SetContainerId(cnr.EncodeToString())
body.SetObjectId(obj.EncodeToString())
req := new(control.ListShardsForObjectRequest)
req.SetBody(body)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var err error
var resp *control.ListShardsForObjectResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.ListShardsForObject(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
shardIDs := resp.GetBody().GetShard_ID()
isFull, _ := cmd.Flags().GetBool(FullInfoFlag)
if !isFull {
for _, id := range shardIDs {
cmd.Println(base58.Encode(id))
}
return
}
// get full shard info
listShardsReq := new(control.ListShardsRequest)
listShardsReq.SetBody(new(control.ListShardsRequest_Body))
signRequest(cmd, pk, listShardsReq)
var listShardsResp *control.ListShardsResponse
err = cli.ExecRaw(func(client *rawclient.Client) error {
listShardsResp, err = control.ListShards(client, listShardsReq)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody())
shards := listShardsResp.GetBody().GetShards()
sortShardsByID(shards)
shards = filterShards(shards, shardIDs)
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
if isJSON {
prettyPrintShardsJSON(cmd, shards)
} else {
prettyPrintShards(cmd, shards)
}
}
func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo {
var res []control.ShardInfo
for _, id := range ids {
for _, inf := range info {
if bytes.Equal(inf.Shard_ID, id) {
res = append(res, inf)
}
}
}
return res
}

View file

@ -39,7 +39,6 @@ func init() {
listRulesCmd,
getRuleCmd,
listTargetsCmd,
locateObjectCmd,
)
initControlHealthCheckCmd()
@ -53,5 +52,4 @@ func init() {
initControlListRulesCmd()
initControGetRuleCmd()
initControlListTargetsCmd()
initControlLocateObjectCmd()
}

View file

@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse
var err error
err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(cmd.Context(), client, req)
resp, err = control.GetNetmapStatus(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)

View file

@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag))
}
objAddr = ReadObjectAddress(cmd, &cnr, &obj)
objAddr = readObjectAddress(cmd, &cnr, &obj)
}
pk := key.GetOrGenerate(cmd)

View file

@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
filename := cmd.Flag(fileFlag).Value.String()
out, closer := createOutWriter(cmd, filename)

View file

@ -41,7 +41,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)

View file

@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)

View file

@ -101,7 +101,7 @@ func initObjectNodesCmd() {
func objectNodes(cmd *cobra.Command, _ []string) {
var cnrID cid.ID
var objID oid.ID
ReadObjectAddress(cmd, &cnrID, &objID)
readObjectAddress(cmd, &cnrID, &objID)
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@ -320,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy)
placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@ -358,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent
}
placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy)
placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement {

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -56,7 +56,7 @@ func patch(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeSlice(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
}
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
}
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
if err != nil {
return nil, err
var rawAttrs []string
raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
}
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc)
}
@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) {
var cnr cid.ID
var obj oid.ID
objAddr := ReadObjectAddress(cmd, &cnr, &obj)
objAddr := readObjectAddress(cmd, &cnr, &obj)
ranges, err := getRangeList(cmd)
commonCmd.ExitOnErr(cmd, "", err)
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
}
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range")
if len(vs) == 0 || err != nil {
return nil, err
v := cmd.Flag("range").Value.String()
if len(v) == 0 {
return nil, nil
}
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs))
for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string {
return xs
}
func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address {
readCID(cmd, cnr)
readOID(cmd, obj)

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@ -33,13 +34,14 @@ func _client() (tree.TreeServiceClient, error) {
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
}
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {

View file

@ -2,17 +2,13 @@ package meta
import (
"context"
"encoding/binary"
"errors"
"fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@ -31,11 +27,6 @@ Available search filters:
var initialPrompt string
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
2: schema.MetabaseParserV2,
3: schema.MetabaseParserV3,
}
func init() {
common.AddComponentPathFlag(tuiCMD, &vPath)
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
}
defer db.Close()
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
if !hasVersion {
return errors.New("couldn't detect schema version")
}
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
if !ok {
return fmt.Errorf("unknown schema version %d", schemaVersion)
}
// Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
app := tview.NewApplication()
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
var (
shardInfoBucket = []byte{5}
versionRecord = []byte("version")
)
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
err := db.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(shardInfoBucket)
if bkt == nil {
return nil
}
rec := bkt.Get(versionRecord)
if rec == nil {
return nil
}
version = binary.LittleEndian.Uint64(rec)
ok = true
return nil
})
if err != nil {
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
return
}

View file

@ -80,15 +80,10 @@ var (
},
)
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
)
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
)
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: StrictResolver,
@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
cidResolver: LenientResolver,
oidResolver: LenientResolver,
})
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
)

View file

@ -22,31 +22,27 @@ const (
Split
ContainerCounters
ECInfo
ExpirationEpochToObject
ObjectToExpirationEpoch
)
var x = map[Prefix]string{
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
ExpirationEpochToObject: "Exp. Epoch to Object",
ObjectToExpirationEpoch: "Object to Exp. Epoch",
Graveyard: "Graveyard",
Garbage: "Garbage",
ToMoveIt: "To Move It",
ContainerVolume: "Container Volume",
Locked: "Locked",
ShardInfo: "Shard Info",
Primary: "Primary",
Lockers: "Lockers",
Tombstone: "Tombstone",
Small: "Small",
Root: "Root",
Owner: "Owner",
UserAttribute: "User Attribute",
PayloadHash: "Payload Hash",
Parent: "Parent",
Split: "Split",
ContainerCounters: "Container Counters",
ECInfo: "EC Info",
}
func (p Prefix) String() string {

View file

@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string {
return common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
)
}
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf(
"%s CID %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(b.id.String(), tcell.ColorAqua),
)
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
),
common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,

View file

@ -2,7 +2,6 @@ package buckets
import (
"errors"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -58,11 +57,10 @@ var (
)
var (
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
ErrNotBucket = errors.New("not a bucket")
ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix")
)
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
}
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
}
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil {
return nil, nil, ErrNotBucket
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err
}
b.key = string(key[33:])
if len(keys) != 0 && !slices.Contains(keys, b.key) {
return nil, nil, ErrUnexpectedAttributeKey
}
return &b, next, nil
}
}

View file

@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
)
var MetabaseParserV3 = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
buckets.ContainerVolumeParser,
buckets.LockedParser,
buckets.ShardInfoParser,
buckets.PrimaryParser,
buckets.LockersParser,
buckets.TombstoneParser,
buckets.SmallParser,
buckets.RootParser,
buckets.UserAttributeParserV3,
buckets.ParentParser,
buckets.SplitParser,
buckets.ContainerCountersParser,
buckets.ECInfoParser,
buckets.ExpirationEpochToObjectParser,
buckets.ObjectToExpirationEpochParser,
),
common.RawParser.ToFallbackParser(),
)
var MetabaseParserV2 = common.WithFallback(
var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser,
buckets.RootParser,
buckets.OwnerParser,
buckets.UserAttributeParserV2,
buckets.UserAttributeParser,
buckets.PayloadHashParser,
buckets.ParentParser,
buckets.SplitParser,

View file

@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
return spew.Sdump(*r)
}

View file

@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No
}
}
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "cid":
id := val.(cid.ID)
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}

View file

@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
}
return &r, nil, nil
}
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 72 {
return nil, nil, ErrInvalidKeyLength
}
var (
r ExpirationEpochToObjectRecord
err error
)
r.epoch = binary.BigEndian.Uint64(key[:8])
if err = r.cnt.Decode(key[8:40]); err != nil {
return nil, nil, err
}
if err = r.obj.Decode(key[40:]); err != nil {
return nil, nil, err
}
return &r, nil, nil
}
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 32 {
return nil, nil, ErrInvalidKeyLength
}
if len(value) != 8 {
return nil, nil, ErrInvalidValueLength
}
var (
r ObjectToExpirationEpochRecord
err error
)
if err = r.obj.Decode(key); err != nil {
return nil, nil, err
}
r.epoch = binary.LittleEndian.Uint64(value)
return &r, nil, nil
}

View file

@ -2,7 +2,6 @@ package records
import (
"fmt"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2"
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids),
)
}
func (r *ExpirationEpochToObjectRecord) String() string {
return fmt.Sprintf(
"exp. epoch %s %c CID %s OID %s",
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
)
}
func (r *ObjectToExpirationEpochRecord) String() string {
return fmt.Sprintf(
"OID %s %c exp. epoch %s",
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
)
}

View file

@ -79,15 +79,4 @@ type (
id oid.ID
ids []oid.ID
}
ExpirationEpochToObjectRecord struct {
epoch uint64
cnt cid.ID
obj oid.ID
}
ObjectToExpirationEpochRecord struct {
obj oid.ID
epoch uint64
}
)

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path
parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer {
if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
}
bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil {
return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel()
// Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer {
if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil {
return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
// Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer {
if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
}
r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil {
return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] {
) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize)
go func() {
@ -77,13 +77,13 @@ func load[T any](
}
}()
return buffer
return buffer, nil
}
func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] {
buffer := load(
) (<-chan Item[*Bucket], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] {
buffer := load(
) (<-chan Item[*Record], error) {
buffer, err := load(
ctx, db, path, bufferSize,
func(_, value []byte) bool {
return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
}
},
)
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer
return buffer, nil
}
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
buffer := load(
buffer, err := load(
ctx, db, path, 1,
nil,
func(_, value []byte) []byte { return value },
)
if err != nil {
return false, err
}
x, ok := <-buffer
if !ok {
return false, nil
}
if x.err != nil {
return false, x.err
return false, err
}
if x.val != nil {
return false, nil
return false, err
}
return true, nil
}

View file

@ -1,8 +1,6 @@
package tui
import (
"slices"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
)
@ -28,7 +26,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
// Used history data for search prompt, so just make that data recent.
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
f.history = append(f.history, s)
}

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer {
if item.err != nil {
v.ui.stopOnError(item.err)
v.ui.stopOnError(err)
break
}
record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil {
v.ui.stopOnError(err)

View file

@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
c.cfgMorph.client,
apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)

View file

@ -1,7 +1,6 @@
package main
import (
"context"
"sync"
"time"
@ -17,7 +16,7 @@ import (
"github.com/hashicorp/golang-lru/v2/expirable"
)
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct {
v V
@ -50,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false
startedAt := time.Now()
defer func() {
@ -72,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e
}
v, err := c.netRdr(ctx, key)
v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{
v: v,
@ -136,7 +135,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
hit := false
startedAt := time.Now()
defer func() {
@ -149,7 +148,7 @@ func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, e
return val, nil
}
val, err := c.netRdr(ctx, key)
val, err := c.netRdr(key)
if err != nil {
return nil, err
}
@ -167,11 +166,11 @@ type ttlContainerStorage struct {
}
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) {
return v.Get(ctx, id)
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
}, metrics.NewCacheMetrics("container"))
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(ctx, id)
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{
@ -189,12 +188,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(ctx, cnr)
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(cnr)
}
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(ctx, cnr)
func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(cnr)
}
type lruNetmapSource struct {
@ -206,8 +205,8 @@ type lruNetmapSource struct {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key)
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(key)
}, metrics.NewCacheMetrics("netmap"))
return &lruNetmapSource{
@ -216,16 +215,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
}
}
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
}
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, epoch)
func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(epoch)
}
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(ctx, epoch)
func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(epoch)
if err != nil {
return nil, err
}
@ -233,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil
}
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) {
func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil
}
@ -241,10 +240,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
func newCachedIRFetcher(f interface {
InnerRingKeys(ctx context.Context) ([][]byte, error)
},
) cachedIRFetcher {
func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
@ -258,8 +254,8 @@ func newCachedIRFetcher(f interface {
)
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(ctx context.Context, _ struct{}) ([][]byte, error) {
return f.InnerRingKeys(ctx)
func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"),
)
@ -269,8 +265,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) {
val, err := f.get(ctx, struct{}{})
func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(struct{}{})
if err != nil {
return nil, err
}
@ -293,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
}
}
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30
hit := false
@ -315,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock()
size = c.lastSize
if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize(ctx)
size = c.src.MaxObjectSize()
c.lastSize = size
c.lastUpdated = time.Now()
}

View file

@ -1,7 +1,6 @@
package main
import (
"context"
"errors"
"testing"
"time"
@ -18,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
val, err := cache.get(context.Background(), key)
val, err := cache.get(key)
require.NoError(t, err)
require.Equal(t, ti, val)
})
@ -27,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration)
val, err := cache.get(context.Background(), key)
val, err := cache.get(key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
@ -36,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now()
cache.set(key, ti, nil)
cache.remove(key)
val, err := cache.get(context.Background(), key)
val, err := cache.get(key)
require.NoError(t, err)
require.NotEqual(t, val, ti)
})
t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error"))
_, err := cache.get(context.Background(), "error")
_, err := cache.get("error")
require.Error(t, err)
require.Equal(t, "mock error", err.Error())
})
}
func testNetValueReader(_ context.Context, key string) (time.Time, error) {
func testNetValueReader(key string) (time.Time, error) {
if key == "error" {
return time.Now(), errors.New("mock error")
}

View file

@ -33,7 +33,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@ -70,7 +69,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -117,6 +115,7 @@ type applicationConfiguration struct {
EngineCfg struct {
errorThreshold uint32
shardPoolSize uint32
shards []shardCfg
lowMem bool
}
@ -135,7 +134,6 @@ type shardCfg struct {
refillMetabase bool
refillMetabaseWorkersCount int
mode shardmode.Mode
limiter qos.Limiter
metaCfg struct {
path string
@ -249,47 +247,45 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
var target shardCfg
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
var newConfig shardCfg
target.refillMetabase = source.RefillMetabase()
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
target.mode = source.Mode()
target.compress = source.Compress()
target.estimateCompressibility = source.EstimateCompressibility()
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold()
target.uncompressableContentType = source.UncompressableContentTypes()
target.smallSizeObjectLimit = source.SmallSizeLimit()
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
a.setShardWriteCacheConfig(&target, source)
a.setShardWriteCacheConfig(&newConfig, oldConfig)
a.setShardPiloramaConfig(c, &target, source)
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
if err := a.setShardStorageConfig(&target, source); err != nil {
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err
}
a.setMetabaseConfig(&target, source)
a.setMetabaseConfig(&newConfig, oldConfig)
a.setGCConfig(&target, source)
if err := a.setLimiter(&target, source); err != nil {
return err
}
a.setGCConfig(&newConfig, oldConfig)
a.EngineCfg.shards = append(a.EngineCfg.shards, target)
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil
}
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
writeCacheCfg := source.WriteCache()
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() {
wc := &target.writecacheCfg
wc := &newConfig.writecacheCfg
wc.enabled = true
wc.path = writeCacheCfg.Path()
@ -302,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
}
}
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") {
piloramaCfg := source.Pilorama()
pr := &target.piloramaCfg
piloramaCfg := oldConfig.Pilorama()
pr := &newConfig.piloramaCfg
pr.enabled = true
pr.path = piloramaCfg.Path()
@ -316,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
}
}
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
blobStorCfg := source.BlobStor()
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg))
@ -351,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg)
}
target.subStorages = ss
newConfig.subStorages = ss
return nil
}
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
metabaseCfg := source.Metabase()
m := &target.metaCfg
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
metabaseCfg := oldConfig.Metabase()
m := &newConfig.metaCfg
m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm()
@ -365,25 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
}
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
gcCfg := source.GC()
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
limitsConfig := source.Limits()
limiter, err := qos.NewLimiter(limitsConfig)
if err != nil {
return err
}
if target.limiter != nil {
target.limiter.Close()
}
target.limiter = limiter
return nil
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
gcCfg := oldConfig.GC()
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
// internals contains application-specific internals that are created
@ -510,7 +493,6 @@ type cfg struct {
cfgNetmap cfgNetmap
cfgControlService cfgControlService
cfgObject cfgObject
cfgQoSService cfgQoSService
}
// ReadCurrentNetMap reads network map which has been cached at the
@ -545,8 +527,6 @@ type cfgGRPC struct {
maxChunkSize uint64
maxAddrAmount uint64
reconnectTimeout time.Duration
limiter atomic.Pointer[limiting.SemaphoreLimiter]
}
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@ -683,6 +663,10 @@ type cfgAccessPolicyEngine struct {
}
type cfgObjectRoutines struct {
putRemote *ants.Pool
putLocal *ants.Pool
replication *ants.Pool
}
@ -714,7 +698,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
@ -867,14 +852,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
}
func initCfgGRPC() (cfg cfgGRPC) {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
cfg.maxChunkSize = maxChunkSize
cfg.maxAddrAmount = maxAddrAmount
return
return cfgGRPC{
maxChunkSize: maxChunkSize,
maxAddrAmount: maxAddrAmount,
}
}
func initCfgObject(appCfg *config.Config) cfgObject {
@ -891,6 +876,7 @@ func (c *cfg) engineOpts() []engine.Option {
var opts []engine.Option
opts = append(opts,
engine.WithShardPoolSize(c.EngineCfg.shardPoolSize),
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
@ -1045,7 +1031,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
}
if c.metricsCollector != nil {
mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics())))
shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics())
}
var sh shardOptsWithID
@ -1070,12 +1055,11 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool
}),
shard.WithLimiter(shCfg.limiter),
}
return sh
}
func (c *cfg) loggerPrm() *logger.Prm {
func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
@ -1094,7 +1078,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
return c.dynamicConfiguration.logger, nil
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1163,7 +1147,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
}
@ -1182,7 +1166,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error
optNonBlocking := ants.WithNonblocking(true)
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
fatalOnErr(err)
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
fatalOnErr(err)
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
if replicatorPoolSize <= 0 {
replicatorPoolSize = putRemoteCapacity
}
pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err)
@ -1208,7 +1206,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
}
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(ctx, epoch)
ni, err := c.netmapLocalNodeState(epoch)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
@ -1336,7 +1334,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger
logPrm := c.loggerPrm()
logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm)
@ -1412,13 +1414,17 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
return components
}
func (c *cfg) reloadPools() error {
newSize := replicatorconfig.PoolSize(c.appCfg)
newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
newSize = replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil

View file

@ -1,7 +1,6 @@
package config
import (
"slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path)
x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
}

View file

@ -12,10 +12,13 @@ import (
func TestConfigDir(t *testing.T) {
dir := t.TempDir()
cfgFileName := path.Join(dir, "cfg_01.yml")
cfgFileName0 := path.Join(dir, "cfg_00.json")
cfgFileName1 := path.Join(dir, "cfg_01.yml")
require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777))
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
c := New("", dir, "")
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
}

View file

@ -6,7 +6,7 @@ const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
// ContainerBatchSizeDefault represents he maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)

View file

@ -11,6 +11,10 @@ import (
const (
subsection = "storage"
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@ -61,6 +65,18 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
return nil
}
// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section.
//
// Returns ShardPoolSizeDefault if the value is not a positive number.
func ShardPoolSize(c *config.Config) uint32 {
v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size")
if v > 0 {
return v
}
return ShardPoolSizeDefault
}
// ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section.
//
// Returns 0 if the the value is missing.

View file

@ -11,7 +11,6 @@ import (
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
@ -54,6 +53,7 @@ func TestEngineSection(t *testing.T) {
require.False(t, handlerCalled)
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
})
@ -63,6 +63,7 @@ func TestEngineSection(t *testing.T) {
num := 0
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@ -75,7 +76,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages()
pl := sc.Pilorama()
gc := sc.GC()
limits := sc.Limits()
switch num {
case 0:
@ -134,75 +134,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
require.ElementsMatch(t, readLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(20),
ReservedOps: toPtr(1000),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(70),
ReservedOps: toPtr(10000),
},
{
Tag: "background",
Weight: toPtr(5),
LimitOps: toPtr(10000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(200),
ReservedOps: toPtr(100),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(700),
ReservedOps: toPtr(1000),
},
{
Tag: "background",
Weight: toPtr(50),
LimitOps: toPtr(1000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "policer",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
})
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@ -257,17 +188,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
require.Equal(t, 0, len(readLimits.Tags))
require.Equal(t, 0, len(writeLimits.Tags))
}
return nil
})
@ -281,7 +201,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}
func toPtr(v float64) *float64 {
return &v
}

View file

@ -37,7 +37,10 @@ func (x *Config) Perm() fs.FileMode {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d < 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -45,7 +48,10 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}
// NoSync returns the value of "no_sync" config parameter.
@ -60,5 +66,8 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
return max(s, 0)
if s < 0 {
s = 0
}
return s
}

View file

@ -4,7 +4,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
@ -126,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
)
}
// Limits returns "limits" subsection as a limitsconfig.Config.
func (x *Config) Limits() *limitsconfig.Config {
return limitsconfig.From(
(*config.Config)(x).
Sub("limits"),
)
}
// RefillMetabase returns the value of "resync_metabase" config parameter.
//
// Returns false if the value is not a valid bool.

View file

@ -1,130 +0,0 @@
package limits
import (
"math"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/spf13/cast"
)
const (
NoLimit int64 = math.MaxInt64
DefaultIdleTimeout = 5 * time.Minute
)
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
// Config is a wrapper over the config section
// which provides access to Shard's limits configurations.
type Config config.Config
// Read returns the value of "read" limits config section.
func (x *Config) Read() OpConfig {
return x.parse("read")
}
// Write returns the value of "write" limits config section.
func (x *Config) Write() OpConfig {
return x.parse("write")
}
func (x *Config) parse(sub string) OpConfig {
c := (*config.Config)(x).Sub(sub)
var result OpConfig
if s := config.Int(c, "max_waiting_ops"); s > 0 {
result.MaxWaitingOps = s
} else {
result.MaxWaitingOps = NoLimit
}
if s := config.Int(c, "max_running_ops"); s > 0 {
result.MaxRunningOps = s
} else {
result.MaxRunningOps = NoLimit
}
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
result.IdleTimeout = s
} else {
result.IdleTimeout = DefaultIdleTimeout
}
result.Tags = tags(c)
return result
}
type OpConfig struct {
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxWaitingOps int64
// MaxRunningOps returns the value of "max_running_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxRunningOps int64
// IdleTimeout returns the value of "idle_timeout" config parameter.
//
// Equals DefaultIdleTimeout if the value is not a valid duration.
IdleTimeout time.Duration
// Tags returns the value of "tags" config parameter.
//
// Equals nil if the value is not a valid tags config slice.
Tags []IOTagConfig
}
type IOTagConfig struct {
Tag string
Weight *float64
LimitOps *float64
ReservedOps *float64
}
func tags(c *config.Config) []IOTagConfig {
c = c.Sub("tags")
var result []IOTagConfig
for i := 0; ; i++ {
tag := config.String(c, strconv.Itoa(i)+".tag")
if tag == "" {
return result
}
var tagConfig IOTagConfig
tagConfig.Tag = tag
v := c.Value(strconv.Itoa(i) + ".weight")
if v != nil {
w, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.Weight = &w
}
v = c.Value(strconv.Itoa(i) + ".limit_ops")
if v != nil {
l, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.LimitOps = &l
}
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
if v != nil {
r, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.ReservedOps = &r
}
result = append(result, tagConfig)
}
}
func panicOnErr(err error) {
if err != nil {
panic(err)
}
}

View file

@ -52,7 +52,10 @@ func (x *Config) NoSync() bool {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchDelay() time.Duration {
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
return max(d, 0)
if d <= 0 {
d = 0
}
return d
}
// MaxBatchSize returns the value of "max_batch_size" config parameter.
@ -60,5 +63,8 @@ func (x *Config) MaxBatchDelay() time.Duration {
// Returns 0 if the value is not a positive number.
func (x *Config) MaxBatchSize() int {
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
return max(s, 0)
if s <= 0 {
s = 0
}
return s
}

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm")
p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 {
p = PermDefault
}
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync")
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -21,6 +21,10 @@ const (
putSubsection = "put"
getSubsection = "get"
// PutPoolSizeDefault is a default value of routine pool size to
// process object.Put requests in object service.
PutPoolSizeDefault = 10
)
// Put returns structure that provides access to "put" subsection of
@ -31,6 +35,30 @@ func Put(c *config.Config) PutConfig {
}
}
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeRemote() int {
v := config.Int(g.cfg, "remote_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeLocal() int {
v := config.Int(g.cfg, "local_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")

View file

@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}

View file

@ -1,46 +0,0 @@
package qos
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
subsection = "qos"
criticalSubSection = "critical"
internalSubSection = "internal"
)
// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, criticalSubSection)
}
// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, internalSubSection)
}
func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
pubs := make(keys.PublicKeys, 0, len(strKeys))
for i := range strKeys {
pub, err := keys.NewPublicKeyFromString(strKeys[i])
if err != nil {
panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
}
pubs = append(pubs, pub)
}
return pubs
}

View file

@ -1,40 +0,0 @@
package qos
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Empty(t, CriticalAuthorizedKeys(empty))
require.Empty(t, InternalAuthorizedKeys(empty))
})
const path = "../../../../config/example/node"
criticalPubs := make(keys.PublicKeys, 2)
criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
internalPubs := make(keys.PublicKeys, 2)
internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
fileConfigTest := func(c *config.Config) {
require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -11,8 +11,6 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second
// PoolSizeDefault is a default pool size for put request in replicator.
PoolSizeDefault = 10
)
// PutTimeout returns the value of "put_timeout" config parameter
@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter
// from "replicator" section.
//
// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int {
v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
if v > 0 {
return v
}
return PoolSizeDefault
return int(config.IntSafe(c.Sub(subsection), "pool_size"))
}

View file

@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
require.Equal(t, 0, replicatorconfig.PoolSize(empty))
})
const path = "../../../../config/example/node"

View file

@ -1,43 +0,0 @@
package rpcconfig
import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
const (
subsection = "rpc"
limitsSubsection = "limits"
)
type LimitConfig struct {
Methods []string
MaxOps int64
}
// Limits returns the "limits" config from "rpc" section.
func Limits(c *config.Config) []LimitConfig {
c = c.Sub(subsection).Sub(limitsSubsection)
var limits []LimitConfig
for i := uint64(0); ; i++ {
si := strconv.FormatUint(i, 10)
sc := c.Sub(si)
methods := config.StringSliceSafe(sc, "methods")
if len(methods) == 0 {
break
}
maxOps := config.IntSafe(sc, "max_ops")
if maxOps == 0 {
panic("no max operations for method group")
}
limits = append(limits, LimitConfig{methods, maxOps})
}
return limits
}

View file

@ -1,53 +0,0 @@
package rpcconfig
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestRPCSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Empty(t, Limits(configtest.EmptyConfig()))
})
t.Run("correct config", func(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(1000))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("no max operations", func(t *testing.T) {
const path = "testdata/node"
fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) })
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
}

View file

@ -1,3 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,18 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
]
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,8 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -43,7 +43,7 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
if cacheSize > 0 {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
@ -100,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
// TODO: use owner directly from the event after neofs-contract#256 will become resolved
// but don't forget about the profit of reading the new container and caching it:
// creation success are most commonly tracked by polling GET op.
cnr, err := cnrSrc.Get(ctx, ev.ID)
cnr, err := cnrSrc.Get(ev.ID)
if err == nil {
containerCache.containerCache.set(ev.ID, cnr, nil)
} else {
@ -221,25 +221,20 @@ type morphContainerReader struct {
src containerCore.Source
lister interface {
ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
ContainersOf(*user.ID) ([]cid.ID, error)
}
}
func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) {
return x.src.Get(ctx, id)
func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.src.Get(id)
}
func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(ctx, id)
func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(id)
}
func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(ctx, id)
}
func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(ctx, id, processCID)
func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(id)
}
type morphContainerWriter struct {

View file

@ -7,12 +7,9 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap"
"google.golang.org/grpc"
)
@ -53,14 +50,7 @@ func initControlService(ctx context.Context, c *cfg) {
return
}
c.cfgControlService.server = grpc.NewServer(
grpc.ChainUnaryInterceptor(
qos.NewSetCriticalIOTagUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
),
// control service has no stream methods, so no stream interceptors added
)
c.cfgControlService.server = grpc.NewServer()
c.onShutdown(func() {
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)

View file

@ -1,7 +1,6 @@
package main
import (
"context"
"strings"
"time"
@ -43,7 +42,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int
}
}
func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
hit := false
startedAt := time.Now()
defer func() {
@ -56,7 +55,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return result.subject, result.err
}
subj, err := m.subjProvider.GetSubject(ctx, addr)
subj, err := m.subjProvider.GetSubject(addr)
if err != nil {
if m.isCacheableError(err) {
m.subjCache.Add(addr, subjectWithError{
@ -70,7 +69,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return subj, nil
}
func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
hit := false
startedAt := time.Now()
defer func() {
@ -83,7 +82,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.
return result.subject, result.err
}
subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr)
subjExt, err := m.subjProvider.GetSubjectExtended(addr)
if err != nil {
if m.isCacheableError(err) {
m.subjExtCache.Add(addr, subjectExtWithError{

View file

@ -4,19 +4,14 @@ import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@ -135,16 +130,12 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor(
qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
grpc.ChainStreamInterceptor(
qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
),
}
@ -233,54 +224,3 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}
func initRPCLimiter(c *cfg) error {
var limits []limiting.KeyLimit
for _, l := range rpcconfig.Limits(c.appCfg) {
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
}
if err := validateRPCLimits(c, limits); err != nil {
return fmt.Errorf("validate RPC limits: %w", err)
}
limiter, err := limiting.NewSemaphoreLimiter(limits)
if err != nil {
return fmt.Errorf("create RPC limiter: %w", err)
}
c.cfgGRPC.limiter.Store(limiter)
return nil
}
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
for _, limit := range limits {
for _, method := range limit.Keys {
if _, ok := availableMethods[method]; !ok {
return fmt.Errorf("set limit on an unknown method %q", method)
}
}
}
return nil
}
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
res := make(map[string]struct{})
for _, server := range servers {
for _, method := range getMethodsForServer(server.Server) {
res[method] = struct{}{}
}
}
return res
}
func getMethodsForServer(server *grpc.Server) []string {
var res []string
for service, info := range server.GetServiceInfo() {
for _, method := range info.Methods {
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
}
}
return res
}

View file

@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
initAccessPolicyEngine(ctx, c)
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
@ -117,8 +116,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "apemanager", initAPEManagerService)
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
}

View file

@ -151,7 +151,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
}
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
return err
}

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
}
}
s.setControlNetmapStatus(ctrlNetSt)
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
}
// sets the current node state to the given value. Subsequent cfg.bootstrap
@ -239,7 +239,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state.
// Must be called after Morph components initialization.
func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch(ctx)
epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo
@ -278,7 +278,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
}
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx)
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil {
return nil, err
}
@ -291,7 +291,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
}
}
node, err := c.netmapLocalNodeState(ctx, epoch)
node, err := c.netmapLocalNodeState(epoch)
if err != nil {
return nil, err
}
@ -312,9 +312,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
return candidate, nil
}
func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch)
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil {
return nil, err
}
@ -376,8 +376,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
}
func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) {
epoch, err := c.netMapSource.Epoch(ctx)
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
epoch, err := c.netMapSource.Epoch()
if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
}
@ -390,7 +390,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error {
}
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx)
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed {
@ -423,7 +423,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
if err != nil {
return err
}
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
}
type netInfo struct {
@ -438,7 +438,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error)
}
func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) {
func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber()
if err != nil {
return nil, err
@ -448,7 +448,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic)
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx)
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
}

View file

@ -54,10 +54,10 @@ type objectSvc struct {
patch *patchsvc.Service
}
func (c *cfg) MaxObjectSize(ctx context.Context) uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx)
func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err),
)
}
@ -122,8 +122,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client
}
func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) {
keys, err := fn.sidechain.NeoFSAlphabetList(ctx)
func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
}
@ -168,7 +168,7 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut)
// build service pipeline
// grpc | audit | qos | <metrics> | signature | response | acl | ape | split
// grpc | audit | <metrics> | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
@ -191,8 +191,7 @@ func initObjectService(c *cfg) {
c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
@ -216,7 +215,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr)
prm.WithForceRemoval()
return ls.Inhume(ctx, prm)
_, err := ls.Inhume(ctx, prm)
return err
}
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -266,7 +266,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil {
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err),
)
@ -326,6 +327,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
objectwriter.WithLogger(c.log),
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
@ -474,7 +476,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm)
_, err := e.engine.Inhume(ctx, prm)
return err
}
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -1,97 +0,0 @@
package main
import (
"bytes"
"context"
qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
)
type cfgQoSService struct {
netmapSource netmap.Source
logger *logger.Logger
allowedCriticalPubs [][]byte
allowedInternalPubs [][]byte
}
func initQoSService(c *cfg) {
criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
rawInternalPubs := make([][]byte, 0, len(internalPubs))
for i := range criticalPubs {
rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
}
for i := range internalPubs {
rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
}
c.cfgQoSService = cfgQoSService{
netmapSource: c.netMapSource,
logger: c.log,
allowedCriticalPubs: rawCriticalPubs,
allowedInternalPubs: rawInternalPubs,
}
}
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
switch ioTag {
case qos.IOTagClient:
return ctx
case qos.IOTagCritical:
for _, pk := range s.allowedCriticalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}

View file

@ -29,16 +29,16 @@ type cnrSource struct {
cli *containerClient.Client
}
func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) {
return c.src.Get(ctx, id)
func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
return c.src.Get(id)
}
func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(ctx, cid)
func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(cid)
}
func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) {
return c.cli.ContainersOf(ctx, nil)
func (c cnrSource) List() ([]cid.ID, error) {
return c.cli.ContainersOf(nil)
}
func initTreeService(c *cfg) {
@ -72,7 +72,7 @@ func initTreeService(c *cfg) {
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService))
tree.RegisterTreeServiceServer(s, c.treeService)
})
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {

View file

@ -87,16 +87,14 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=1000
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
# Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
## 0 shard
### Flag to refill Metabase from BlobStor
@ -156,47 +154,6 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
#### Limit of concurrent workers collecting expired objects by the garbage collector
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
#### Limits config
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500
## 1 shard
### Flag to refill Metabase from BlobStor
@ -268,6 +225,3 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
FROSTFS_MULTINET_BALANCER=roundrobin
FROSTFS_MULTINET_RESTRICT=false
FROSTFS_MULTINET_FALLBACK_DELAY=350ms
FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"

View file

@ -134,30 +134,16 @@
"tombstone_lifetime": 10
},
"put": {
"remote_pool_size": 100,
"local_pool_size": 200,
"skip_session_token_issuer_verification": true
},
"get": {
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
}
},
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 1000
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
},
"storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100,
"shard": {
"0": {
@ -220,76 +206,6 @@
"remover_sleep_interval": "2m",
"expired_collector_batch_size": 1500,
"expired_collector_worker_count": 15
},
"limits": {
"read": {
"max_running_ops": 10000,
"max_waiting_ops": 1000,
"idle_timeout": "30s",
"tags": [
{
"tag": "internal",
"weight": 20,
"limit_ops": 0,
"reserved_ops": 1000
},
{
"tag": "client",
"weight": 70,
"reserved_ops": 10000
},
{
"tag": "background",
"weight": 5,
"limit_ops": 10000,
"reserved_ops": 0
},
{
"tag": "writecache",
"weight": 5,
"limit_ops": 25000
},
{
"tag": "policer",
"weight": 5,
"limit_ops": 25000
}
]
},
"write": {
"max_running_ops": 1000,
"max_waiting_ops": 100,
"idle_timeout": "45s",
"tags": [
{
"tag": "internal",
"weight": 200,
"limit_ops": 0,
"reserved_ops": 100
},
{
"tag": "client",
"weight": 700,
"reserved_ops": 1000
},
{
"tag": "background",
"weight": 50,
"limit_ops": 1000,
"reserved_ops": 0
},
{
"tag": "writecache",
"weight": 50,
"limit_ops": 2500
},
{
"tag": "policer",
"weight": 50,
"limit_ops": 2500
}
]
}
}
},
"1": {
@ -389,19 +305,5 @@
"balancer": "roundrobin",
"restrict": false,
"fallback_delay": "350ms"
},
"qos": {
"critical": {
"authorized_keys": [
"035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11",
"028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6"
]
},
"internal": {
"authorized_keys": [
"02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2",
"031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a"
]
}
}
}

View file

@ -79,8 +79,7 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
morph:
dial_timeout: 30s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls).
# Negative value disables caching. A zero value sets the default value.
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
@ -117,24 +116,17 @@ object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
put:
remote_pool_size: 100 # number of async workers for remote PUT operations
local_pool_size: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
get:
priority: # list of metrics of nodes for prioritization
- $attribute:ClusterName
- $attribute:UN-LOCODE
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 1000
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000
storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
shard:
@ -226,52 +218,6 @@ storage:
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
limits:
read:
max_running_ops: 10000
max_waiting_ops: 1000
idle_timeout: 30s
tags:
- tag: internal
weight: 20
limit_ops: 0
reserved_ops: 1000
- tag: client
weight: 70
reserved_ops: 10000
- tag: background
weight: 5
limit_ops: 10000
reserved_ops: 0
- tag: writecache
weight: 5
limit_ops: 25000
- tag: policer
weight: 5
limit_ops: 25000
write:
max_running_ops: 1000
max_waiting_ops: 100
idle_timeout: 45s
tags:
- tag: internal
weight: 200
limit_ops: 0
reserved_ops: 100
- tag: client
weight: 700
reserved_ops: 1000
- tag: background
weight: 50
limit_ops: 1000
reserved_ops: 0
- tag: writecache
weight: 50
limit_ops: 2500
- tag: policer
weight: 50
limit_ops: 2500
1:
writecache:
path: tmp/1/cache # write-cache root directory
@ -323,13 +269,3 @@ multinet:
balancer: roundrobin
restrict: false
fallback_delay: 350ms
qos:
critical:
authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag
- 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2
- 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a

View file

@ -42,6 +42,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080",
@ -97,6 +98,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082",
@ -152,6 +154,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084",
@ -207,6 +210,7 @@
"FROSTFS_MORPH_DIAL_TIMEOUT":"30s",
"FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws",
"FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0",
"FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s",
"FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json",
"FROSTFS_NODE_WALLET_PASSWORD":"",
"FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086",

View file

@ -95,15 +95,19 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release
### Prepare and push images to a Docker registry (automated)
### Prepare and push images to a Docker Hub (if not automated)
Create Docker images for all applications and push them into container registry
(executed automatically in Forgejo Actions upon pushing a release tag):
Create Docker images for all applications and push them into Docker Hub
(requires [organization](https://hub.docker.com/u/truecloudlab) privileges)
```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images
$ make push-images
$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
```
### Make a proper release (if not automated)

View file

@ -26,8 +26,7 @@ There are some custom types used for brevity:
| `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
| `audit` | [Audit configuration](#audit-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
| `qos` | [QoS configuration](#qos-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
# `control` section
```yaml
@ -170,6 +169,7 @@ Local storage engine configuration.
| Parameter | Type | Default value | Description |
|----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------|
| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. |
| `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. |
| `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. |
| `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. |
@ -194,7 +194,6 @@ The following table describes configuration for each shard.
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. |
| `gc` | [GC config](#gc-subsection) | | GC configuration. |
| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. |
### `blobstor` subsection
@ -301,64 +300,6 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
### `limits` subsection
```yaml
limits:
max_read_running_ops: 10000
max_read_waiting_ops: 1000
max_write_running_ops: 1000
max_write_waiting_ops: 100
read:
- tag: internal
weight: 20
limit_ops: 0
reserved_ops: 1000
- tag: client
weight: 70
reserved_ops: 10000
- tag: background
weight: 5
limit_ops: 10000
reserved_ops: 0
- tag: writecache
weight: 5
limit_ops: 25000
- tag: policer
weight: 5
limit_ops: 25000
write:
- tag: internal
weight: 200
limit_ops: 0
reserved_ops: 100
- tag: client
weight: 700
reserved_ops: 1000
- tag: background
weight: 50
limit_ops: 1000
reserved_ops: 0
- tag: writecache
weight: 50
limit_ops: 2500
- tag: policer
weight: 50
limit_ops: 2500
```
| Parameter | Type | Default value | Description |
| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- |
| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. |
| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. |
| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. |
| `read` | `[]tag` | empty | Array of shard read settings for tags. |
| `write` | `[]tag` | empty | Array of shard write settings for tags. |
| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. |
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
# `node` section
@ -454,16 +395,18 @@ replicator:
pool_size: 10
```
| Parameter | Type | Default value | Description |
|---------------|------------|---------------|---------------------------------------------|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. |
| Parameter | Type | Default value | Description |
|---------------|------------|----------------------------------------|---------------------------------------------|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
# `object` section
Contains object-service related parameters.
```yaml
object:
put:
remote_pool_size: 100
get:
priority:
- $attribute:ClusterName
@ -472,29 +415,10 @@ object:
| Parameter | Type | Default value | Description |
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------|
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. |
# `rpc` section
Contains limits on the number of active RPC for specified method(s).
```yaml
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 1000
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000
```
| Parameter | Type | Default value | Description |
|------------------|------------|---------------|--------------------------------------------------------------|
| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) |
| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit |
# `runtime` section
Contains runtime parameters.
@ -547,20 +471,3 @@ multinet:
| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
# `qos` section
```yaml
qos:
critical:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
internal:
authorized_keys:
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
```
| Parameter | Type | Default value | Description |
| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- |
| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. |
| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. |

Some files were not shown because too many files have changed in this diff Show more