Compare commits

..

12 commits

Author SHA1 Message Date
7e3f2fca47 [#1563] tree: Wrap only ChainRouterError erros with ObjectAccessDenied
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 3m25s
Pre-commit hooks / Pre-commit (pull_request) Successful in 4m26s
Tests and linters / gopls check (pull_request) Successful in 4m32s
Tests and linters / Tests with -race (pull_request) Successful in 7m7s
Tests and linters / Run gofumpt (pull_request) Successful in 8m50s
DCO action / DCO (pull_request) Successful in 9m21s
Build / Build Components (pull_request) Successful in 10m18s
Tests and linters / Tests (pull_request) Successful in 10m40s
Tests and linters / Staticcheck (pull_request) Successful in 10m52s
Tests and linters / Lint (pull_request) Successful in 11m20s
* Such wrapping helps to differentiate logical check errors and server internal
  errors.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 16:19:04 +03:00
25ed595354 [#1563] object: Wrap only ChainRouterError erros with ObjectAccessDenied
* Such wrapping helps to differentiate logical check errors and server internal
  errors.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 16:18:58 +03:00
fb2f40aea1 [#1563] ape: Introduce ChainRouterError error type
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 16:18:51 +03:00
15b4288d80
[#1548] metabase: Check if EC parent is removed or expired
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 2m32s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m24s
Tests and linters / Run gofumpt (pull_request) Successful in 3m27s
DCO action / DCO (pull_request) Successful in 3m45s
Tests and linters / gopls check (pull_request) Successful in 4m3s
Build / Build Components (pull_request) Successful in 4m23s
Tests and linters / Staticcheck (pull_request) Successful in 4m52s
Tests and linters / Tests (pull_request) Successful in 5m29s
Tests and linters / Lint (pull_request) Successful in 5m44s
Tests and linters / Tests with -race (pull_request) Successful in 6m40s
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-12-11 10:42:12 +03:00
6ea8f2b23c
[#1548] engine: Rename parent -> ecParent
Parent could mean split parent or EC parent. In this case it is EC parent only.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-12-11 10:41:32 +03:00
47f12f8440
[#1548] policer: Do not replicate EC chunk if object already removed
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-12-11 10:41:00 +03:00
edc1824c23
[#1544] go.mod: Update sdk-go
All checks were successful
DCO action / DCO (pull_request) Successful in 1m48s
Tests and linters / Run gofumpt (pull_request) Successful in 1m48s
Tests and linters / Staticcheck (pull_request) Successful in 3m8s
Vulncheck / Vulncheck (pull_request) Successful in 3m9s
Build / Build Components (pull_request) Successful in 3m45s
Pre-commit hooks / Pre-commit (pull_request) Successful in 4m5s
Tests and linters / Lint (pull_request) Successful in 4m28s
Tests and linters / Tests (pull_request) Successful in 4m32s
Tests and linters / gopls check (pull_request) Successful in 4m44s
Tests and linters / Tests with -race (pull_request) Successful in 5m17s
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-06 12:50:39 +03:00
cf48dfd55e [#1524] tree: Make check APE error get wrapped to api status
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m11s
DCO action / DCO (pull_request) Successful in 2m26s
Vulncheck / Vulncheck (pull_request) Successful in 2m37s
Pre-commit hooks / Pre-commit (pull_request) Successful in 2m55s
Build / Build Components (pull_request) Successful in 3m21s
Tests and linters / gopls check (pull_request) Successful in 3m25s
Tests and linters / Staticcheck (pull_request) Successful in 3m55s
Tests and linters / Lint (pull_request) Successful in 4m36s
Tests and linters / Tests (pull_request) Successful in 5m1s
Tests and linters / Tests with -race (pull_request) Successful in 6m4s
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-04 09:57:46 +03:00
274ac61236 [#1524] ape: Make APE checker return error without status
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-04 09:57:41 +03:00
892542d6e3 [#1532] node: Allow to omit metabase.path if shard is disabled
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-03 13:11:16 +00:00
af3d6368b0 [#1530] cli: Keep order for required nodes in the result of object nodes
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 1m54s
DCO action / DCO (pull_request) Successful in 2m23s
Vulncheck / Vulncheck (pull_request) Successful in 2m34s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m2s
Build / Build Components (pull_request) Successful in 3m11s
Tests and linters / gopls check (pull_request) Successful in 3m26s
Tests and linters / Staticcheck (pull_request) Successful in 3m49s
Tests and linters / Lint (pull_request) Successful in 4m34s
Tests and linters / Tests (pull_request) Successful in 4m57s
Tests and linters / Tests with -race (pull_request) Successful in 6m28s
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-12-03 11:45:52 +03:00
36fe470956 [#1530] node: Keep order for equal elements when sort priority metrics
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-12-03 11:45:38 +03:00
410 changed files with 4074 additions and 6313 deletions

View file

@ -1,28 +0,0 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -19,7 +19,6 @@ jobs:
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.23'
check-latest: true
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -89,7 +89,5 @@ linters:
- protogetter - protogetter
- intrange - intrange
- tenv - tenv
- unconvert
- unparam
disable-all: true disable-all: true
fast: false fast: false

View file

@ -1,3 +0,0 @@
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
.forgejo/.* @potyarkin
Makefile @potyarkin

View file

@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.62.2 LINT_VERSION ?= 1.62.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images # Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container # Run `make %` in Golang container
docker/%: docker/%:
docker run --rm -t \ docker run --rm -t \
@ -279,12 +270,10 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \ echo "Frostfs contracts not found"; exit 1; \
fi fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
--storage-wallet ./dev/storage/wallet01.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
--storage-wallet ./dev/storage/wallet02.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
--storage-wallet ./dev/storage/wallet03.json \ ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
--storage-wallet ./dev/storage/wallet04.json
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \ make locode-download; \
fi fi

View file

@ -16,16 +16,9 @@ const (
EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagDesc = "N3 RPC node endpoint"
EndpointFlagShort = "r" EndpointFlagShort = "r"
WalletPath = "wallet"
WalletPathShorthand = "w"
WalletPathUsage = "Path to the wallet"
AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlag = "alphabet-wallets"
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
AdminWalletPath = "wallet-admin"
AdminWalletUsage = "Path to the admin wallet"
LocalDumpFlag = "local-dump" LocalDumpFlag = "local-dump"
ProtoConfigPath = "protocol" ProtoConfigPath = "protocol"
ContractsInitFlag = "contracts" ContractsInitFlag = "contracts"

View file

@ -28,7 +28,6 @@ const (
var ( var (
errNoPathsFound = errors.New("no metabase paths found") errNoPathsFound = errors.New("no metabase paths found")
errNoMorphEndpointsFound = errors.New("no morph endpoints found") errNoMorphEndpointsFound = errors.New("no morph endpoints found")
errUpgradeFailed = errors.New("upgrade failed")
) )
var UpgradeCmd = &cobra.Command{ var UpgradeCmd = &cobra.Command{
@ -92,19 +91,14 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return err return err
} }
allSuccess := true
for mb, ok := range result { for mb, ok := range result {
if ok { if ok {
cmd.Println(mb, ": success") cmd.Println(mb, ": success")
} else { } else {
cmd.Println(mb, ": failed") cmd.Println(mb, ": failed")
allSuccess = false
} }
} }
if allSuccess {
return nil return nil
}
return errUpgradeFailed
} }
func getMetabasePaths(appCfg *config.Config) ([]string, error) { func getMetabasePaths(appCfg *config.Config) ([]string, error) {
@ -141,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
if err != nil { if err != nil {
return nil, fmt.Errorf("resolve container contract hash: %w", err) return nil, fmt.Errorf("resolve container contract hash: %w", err)
} }
cc, err := morphcontainer.NewFromMorph(cli, sh, 0) cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
if err != nil { if err != nil {
return nil, fmt.Errorf("create morph container client: %w", err) return nil, fmt.Errorf("create morph container client: %w", err)
} }

View file

@ -3,8 +3,6 @@ package ape
import ( import (
"errors" "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -55,15 +53,16 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
} }
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) { func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil) inv := invoker.New(c, nil)
var ch util.Uint160
r := management.NewReader(inv) r := management.NewReader(inv)
nnsCs, err := helper.GetContractByID(r, 1) nnsCs, err := helper.GetContractByID(r, 1)
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
invokerAdapter := &invokerAdapter{ invokerAdapter := &invokerAdapter{
@ -75,11 +74,10 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
} }
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) { func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
var ch util.Uint160 var ch util.Uint160

View file

@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
nmHash util.Uint160 nmHash util.Uint160
) )
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return err return err
} }

View file

@ -26,7 +26,7 @@ import (
const forceConfigSet = "force" const forceConfigSet = "force"
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't create N3 client: %w", err) return fmt.Errorf("can't create N3 client: %w", err)
} }

View file

@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("invalid filename: %w", err) return fmt.Errorf("invalid filename: %w", err)
} }
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't create N3 client: %w", err) return fmt.Errorf("can't create N3 client: %w", err)
} }
@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
} }
func listContainers(cmd *cobra.Command, _ []string) error { func listContainers(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't create N3 client: %w", err) return fmt.Errorf("can't create N3 client: %w", err)
} }

View file

@ -36,7 +36,7 @@ type contractDumpInfo struct {
} }
func dumpContractHashes(cmd *cobra.Command, _ []string) error { func dumpContractHashes(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't create N3 client: %w", err) return fmt.Errorf("can't create N3 client: %w", err)
} }

View file

@ -39,11 +39,6 @@ const (
groupIDFlag = "group-id" groupIDFlag = "group-id"
rootNamespacePlaceholder = "<root>" rootNamespacePlaceholder = "<root>"
keyFlag = "key"
keyDescFlag = "Key for storing a value in the subject's KV storage"
valueFlag = "value"
valueDescFlag = "Value to be stored in the subject's KV storage"
) )
var ( var (
@ -157,23 +152,6 @@ var (
}, },
Run: frostfsidListGroupSubjects, Run: frostfsidListGroupSubjects,
} }
frostfsidSetKVCmd = &cobra.Command{
Use: "set-kv",
Short: "Store a key-value pair in the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidSetKV,
}
frostfsidDeleteKVCmd = &cobra.Command{
Use: "delete-kv",
Short: "Delete a value from the subject's KV storage",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
},
Run: frostfsidDeleteKV,
}
) )
func initFrostfsIDCreateNamespaceCmd() { func initFrostfsIDCreateNamespaceCmd() {
@ -259,21 +237,6 @@ func initFrostfsIDListGroupSubjectsCmd() {
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
} }
func initFrostfsIDSetKVCmd() {
Cmd.AddCommand(frostfsidSetKVCmd)
frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
}
func initFrostfsIDDeleteKVCmd() {
Cmd.AddCommand(frostfsidDeleteKVCmd)
frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
}
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
@ -291,7 +254,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
reader := frostfsidrpclient.NewReader(inv, hash) reader := frostfsidrpclient.NewReader(inv, hash)
sessionID, it, err := reader.ListNamespaces() sessionID, it, err := reader.ListNamespaces()
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
namespaces, err := frostfsidclient.ParseNamespaces(items) namespaces, err := frostfsidclient.ParseNamespaces(items)
@ -343,7 +306,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListNamespaceSubjects(ns) sessionID, it, err := reader.ListNamespaceSubjects(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
@ -357,7 +320,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListSubjects() sessionID, it, err := reader.ListSubjects()
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subj, err := frostfsidclient.ParseSubject(items) subj, err := frostfsidclient.ParseSubject(items)
@ -403,7 +366,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroups(ns) sessionID, it, err := reader.ListGroups(ns)
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
groups, err := frostfsidclient.ParseGroups(items) groups, err := frostfsidclient.ParseGroups(items)
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
@ -441,45 +404,6 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
} }
func frostfsidSetKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
value, _ := cmd.Flags().GetString(valueFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "set KV: %w", err)
}
func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
subjectAddress := getFrostfsIDSubjectAddress(cmd)
key, _ := cmd.Flags().GetString(keyFlag)
if key == "" {
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
}
ffsid, err := newFrostfsIDClient(cmd)
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
ffsid.addCall(method, args)
err = ffsid.sendWait()
commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
}
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
ns := getFrostfsIDNamespace(cmd) ns := getFrostfsIDNamespace(cmd)
groupID := getFrostfsIDGroupID(cmd) groupID := getFrostfsIDGroupID(cmd)
@ -492,7 +416,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
items, err := readIterator(inv, &it, sessionID) items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
@ -565,28 +489,32 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
} }
f.bw.Reset() f.bw.Reset()
if len(f.wCtx.SentTxs) == 0 {
return nil, errors.New("no transactions to wait")
}
f.wCtx.Command.Println("Waiting for transactions to persist...") f.wCtx.Command.Println("Waiting for transactions to persist...")
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
} }
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
var shouldStop bool var shouldStop bool
res := make([]stackitem.Item, 0) res := make([]stackitem.Item, 0)
for !shouldStop { for !shouldStop {
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) items, err := inv.TraverseIterator(sessionID, iter, batchSize)
if err != nil { if err != nil {
return nil, err return nil, err
} }
res = append(res, items...) res = append(res, items...)
shouldStop = len(items) < iteratorBatchSize shouldStop = len(items) < batchSize
} }
return res, nil return res, nil
} }
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) { func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil) inv := invoker.New(c, nil)

View file

@ -1,12 +1,59 @@
package frostfsid package frostfsid
import ( import (
"encoding/hex"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/viper"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestFrostfsIDConfig(t *testing.T) {
pks := make([]*keys.PrivateKey, 4)
for i := range pks {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pks[i] = pk
}
fmts := []string{
pks[0].GetScriptHash().StringLE(),
address.Uint160ToString(pks[1].GetScriptHash()),
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
hex.EncodeToString(pks[3].PublicKey().Bytes()),
}
for i := range fmts {
v := viper.New()
v.Set("frostfsid.admin", fmts[i])
actual, found, err := helper.GetFrostfsIDAdmin(v)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, pks[i].GetScriptHash(), actual)
}
t.Run("bad key", func(t *testing.T) {
v := viper.New()
v.Set("frostfsid.admin", "abc")
_, found, err := helper.GetFrostfsIDAdmin(v)
require.Error(t, err)
require.True(t, found)
})
t.Run("missing key", func(t *testing.T) {
v := viper.New()
_, found, err := helper.GetFrostfsIDAdmin(v)
require.NoError(t, err)
require.False(t, found)
})
}
func TestNamespaceRegexp(t *testing.T) { func TestNamespaceRegexp(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string

View file

@ -12,8 +12,6 @@ func init() {
initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDAddSubjectToGroupCmd()
initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd()
initFrostfsIDListGroupSubjectsCmd() initFrostfsIDListGroupSubjectsCmd()
initFrostfsIDSetKVCmd()
initFrostfsIDDeleteKVCmd()
initFrostfsIDAddSubjectKeyCmd() initFrostfsIDAddSubjectKeyCmd()
initFrostfsIDRemoveSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd()
} }

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/smartcontract"
@ -140,11 +141,41 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
} }
func generateStorageCreds(cmd *cobra.Command, _ []string) error { func generateStorageCreds(cmd *cobra.Command, _ []string) error {
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) return refillGas(cmd, storageGasConfigFlag, true)
w, err := wallet.NewWallet(walletPath) }
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
// storage wallet path is not part of the config
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
// wallet address is not part of the config
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
var gasReceiver util.Uint160
if len(walletAddress) != 0 {
gasReceiver, err = address.StringToUint160(walletAddress)
if err != nil { if err != nil {
return fmt.Errorf("create wallet: %w", err) return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
} }
} else {
if storageWalletPath == "" {
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
}
var w *wallet.Wallet
if createWallet {
w, err = wallet.NewWallet(storageWalletPath)
} else {
w, err = wallet.NewWalletFromFile(storageWalletPath)
}
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if createWallet {
var password string
label, _ := cmd.Flags().GetString(storageWalletLabelFlag) label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label) password, err := config.GetStoragePassword(viper.GetViper(), label)
@ -159,10 +190,11 @@ func generateStorageCreds(cmd *cobra.Command, _ []string) error {
if err := w.CreateAccount(label, password); err != nil { if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err) return fmt.Errorf("can't create account: %w", err)
} }
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) }
}
gasReceiver = w.Accounts[0].Contract.ScriptHash()
}
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
gasStr := viper.GetString(gasFlag) gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr) gasAmount, err := helper.ParseGASAmount(gasStr)
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
} }
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
for _, gasReceiver := range gasReceivers {
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
if bw.Err != nil { if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
} }

View file

@ -1,12 +1,7 @@
package generate package generate
import ( import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -38,27 +33,7 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
}, },
RunE: func(cmd *cobra.Command, _ []string) error { RunE: func(cmd *cobra.Command, _ []string) error {
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
var gasReceivers []util.Uint160
for _, walletAddress := range walletAddresses {
addr, err := address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
gasReceivers = append(gasReceivers, addr)
}
for _, storageWalletPath := range storageWalletPaths {
w, err := wallet.NewWalletFromFile(storageWalletPath)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
}
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
}, },
} }
GenerateAlphabetCmd = &cobra.Command{ GenerateAlphabetCmd = &cobra.Command{
@ -75,10 +50,10 @@ var (
func initRefillGasCmd() { func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
} }
func initGenerateStorageCmd() { func initGenerateStorageCmd() {

View file

@ -3,6 +3,9 @@ package helper
import ( import (
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
@ -13,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
@ -24,86 +28,32 @@ type LocalActor struct {
rpcInvoker invoker.RPCInvoke rpcInvoker invoker.RPCInvoke
} }
type AlphabetWallets struct {
Label string
Path string
}
func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
w, err := GetAlphabetWallets(v, a.Path)
if err != nil {
return nil, err
}
var accounts []*wallet.Account
for _, wall := range w {
acc, err := GetWalletAccount(wall, a.Label)
if err != nil {
return nil, err
}
accounts = append(accounts, acc)
}
return accounts, nil
}
type RegularWallets struct{ Path string }
func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
w, err := getRegularWallet(r.Path)
if err != nil {
return nil, err
}
return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
}
// NewLocalActor create LocalActor with accounts form provided wallets. // NewLocalActor create LocalActor with accounts form provided wallets.
// In case of empty wallets provided created actor with dummy account only for read operation. // In case of empty wallets provided created actor with dummy account only for read operation.
// //
// If wallets are provided, the contract client will use accounts with accName name from these wallets. // If wallets are provided, the contract client will use accounts with accName name from these wallets.
// To determine which account name should be used in a contract client, refer to how the contract // To determine which account name should be used in a contract client, refer to how the contract
// verifies the transaction signature. // verifies the transaction signature.
func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) {
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
var act *actor.Actor var act *actor.Actor
var accounts []*wallet.Account var accounts []*wallet.Account
var signers []actor.SignerAccount
if alphabet != nil { wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
account, err := alphabet.GetAccount(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
if err != nil {
return nil, err for _, w := range wallets {
acc, err := GetWalletAccount(w, accName)
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
accounts = append(accounts, acc)
} }
act, err = actor.New(c, []actor.SignerAccount{{
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{ Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(), Account: accounts[0].Contract.ScriptHash(),
Scopes: transaction.Global, Scopes: transaction.Global,
}, },
Account: account[0], Account: accounts[0],
}) }})
}
for _, w := range regularWallets {
if w == nil {
continue
}
account, err := w.GetAccount()
if err != nil {
return nil, err
}
accounts = append(accounts, account...)
signers = append(signers, actor.SignerAccount{
Signer: transaction.Signer{
Account: account[0].Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: account[0],
})
}
act, err := actor.New(c, signers)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker) h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
} }
if method != constants.UpdateMethodName || err == nil && !found { if method != constants.UpdateMethodName || err == nil && !found {
h, found, err = getFrostfsIDAdmin(viper.GetViper()) h, found, err = GetFrostfsIDAdmin(viper.GetViper())
} }
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -11,7 +11,7 @@ import (
const frostfsIDAdminConfigKey = "frostfsid.admin" const frostfsIDAdminConfigKey = "frostfsid.admin"
func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
admin := v.GetString(frostfsIDAdminConfigKey) admin := v.GetString(frostfsIDAdminConfigKey)
if admin == "" { if admin == "" {
return util.Uint160{}, false, nil return util.Uint160{}, false, nil

View file

@ -1,53 +0,0 @@
package helper
import (
"encoding/hex"
"testing"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
func TestFrostfsIDConfig(t *testing.T) {
pks := make([]*keys.PrivateKey, 4)
for i := range pks {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
pks[i] = pk
}
fmts := []string{
pks[0].GetScriptHash().StringLE(),
address.Uint160ToString(pks[1].GetScriptHash()),
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
hex.EncodeToString(pks[3].PublicKey().Bytes()),
}
for i := range fmts {
v := viper.New()
v.Set("frostfsid.admin", fmts[i])
actual, found, err := getFrostfsIDAdmin(v)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, pks[i].GetScriptHash(), actual)
}
t.Run("bad key", func(t *testing.T) {
v := viper.New()
v.Set("frostfsid.admin", "abc")
_, found, err := getFrostfsIDAdmin(v)
require.Error(t, err)
require.True(t, found)
})
t.Run("missing key", func(t *testing.T) {
v := viper.New()
_, found, err := getFrostfsIDAdmin(v)
require.NoError(t, err)
require.False(t, found)
})
}

View file

@ -134,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
return nil, err return nil, err
} }
accounts, err := getSingleAccounts(wallets) accounts, err := createWalletAccounts(wallets)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cliCtx, err := defaultClientContext(c, committeeAcc) cliCtx, err := DefaultClientContext(c, committeeAcc)
if err != nil { if err != nil {
return nil, fmt.Errorf("client context: %w", err) return nil, fmt.Errorf("client context: %w", err)
} }
@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
} }
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String()) c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
} else { } else {
c, err = NewRemoteClient(v) c, err = GetN3Client(v)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("can't create N3 client: %w", err) return nil, fmt.Errorf("can't create N3 client: %w", err)
@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
return ctrPath, nil return ctrPath, nil
} }
func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
accounts := make([]*wallet.Account, len(wallets)) accounts := make([]*wallet.Account, len(wallets))
for i, w := range wallets { for i, w := range wallets {
acc, err := GetWalletAccount(w, constants.SingleAccountName) acc, err := GetWalletAccount(w, constants.SingleAccountName)

View file

@ -58,30 +58,35 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
return nil, err return nil, err
} }
go bc.Run() m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
accounts := make([]*wallet.Account, len(wallets))
accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets) for i := range accounts {
accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
indexMap := make(map[string]int)
for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
indexMap[pub] = i
}
sort.Slice(accounts, func(i, j int) bool {
pi := accounts[i].PrivateKey().PublicKey().Bytes()
pj := accounts[j].PrivateKey().PublicKey().Bytes()
return indexMap[string(pi)] < indexMap[string(pj)]
})
sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
})
go bc.Run()
if cmd.Name() != "init" { if cmd.Name() != "init" {
if err := restoreDump(bc, dumpPath); err != nil {
return nil, fmt.Errorf("restore dump: %w", err)
}
}
return &LocalClient{
bc: bc,
dumpPath: dumpPath,
accounts: accounts,
}, nil
}
func restoreDump(bc *core.Blockchain, dumpPath string) error {
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
if err != nil { if err != nil {
return fmt.Errorf("can't open local dump: %w", err) return nil, fmt.Errorf("can't open local dump: %w", err)
} }
defer f.Close() defer f.Close()
@ -94,37 +99,15 @@ func restoreDump(bc *core.Blockchain, dumpPath string) error {
count := r.ReadU32LE() - skip count := r.ReadU32LE() - skip
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
return err return nil, fmt.Errorf("can't restore local dump: %w", err)
} }
return nil
}
func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
accounts := make([]*wallet.Account, len(wallets))
for i := range accounts {
acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
if err != nil {
return nil, err
}
accounts[i] = acc
} }
indexMap := make(map[string]int) return &LocalClient{
for i, pub := range cfg.StandbyCommittee { bc: bc,
indexMap[pub] = i dumpPath: dumpPath,
} accounts: accounts[:m],
}, nil
sort.Slice(accounts, func(i, j int) bool {
pi := accounts[i].PrivateKey().PublicKey().Bytes()
pj := accounts[j].PrivateKey().PublicKey().Bytes()
return indexMap[string(pi)] < indexMap[string(pj)]
})
sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
})
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
return accounts[:m], nil
} }
func (l *LocalClient) GetBlockCount() (uint32, error) { func (l *LocalClient) GetBlockCount() (uint32, error) {
@ -145,6 +128,11 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
return &a, nil return &a, nil
} }
func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
// not used by `morph init` command
panic("unexpected call")
}
// InvokeFunction is implemented via `InvokeScript`. // InvokeFunction is implemented via `InvokeScript`.
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) { func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
var err error var err error
@ -308,7 +296,13 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
} }
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) { func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
tx = tx.Copy() // We need to test that transaction was formed correctly to catch as many errors as we can.
bs := tx.Bytes()
_, err := transaction.NewTransactionFromBytes(bs)
if err != nil {
return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
}
l.transactions = append(l.transactions, tx) l.transactions = append(l.transactions, tx)
return tx.Hash(), nil return tx.Hash(), nil
} }

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
@ -24,10 +25,15 @@ import (
// Client represents N3 client interface capable of test-invoking scripts // Client represents N3 client interface capable of test-invoking scripts
// and sending signed transactions to chain. // and sending signed transactions to chain.
type Client interface { type Client interface {
actor.RPCActor invoker.RPCInvoke
GetBlockCount() (uint32, error)
GetNativeContracts() ([]state.Contract, error) GetNativeContracts() ([]state.Contract, error)
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
GetVersion() (*result.Version, error)
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
GetCommittee() (keys.PublicKeys, error)
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
} }
type HashVUBPair struct { type HashVUBPair struct {
@ -42,7 +48,7 @@ type ClientContext struct {
SentTxs []HashVUBPair SentTxs []HashVUBPair
} }
func NewRemoteClient(v *viper.Viper) (Client, error) { func GetN3Client(v *viper.Viper) (Client, error) {
// number of opened connections // number of opened connections
// by neo-go client per one host // by neo-go client per one host
const ( const (
@ -82,14 +88,8 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
return c, nil return c, nil
} }
func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
commAct, err := actor.New(c, []actor.SignerAccount{{ commAct, err := NewActor(c, committeeAcc)
Signer: transaction.Signer{
Account: committeeAcc.Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: committeeAcc,
}})
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -14,36 +14,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
"github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
return nil, err
}
password, err := input.ReadPassword("Enter password for wallet:")
if err != nil {
return nil, fmt.Errorf("can't fetch password: %w", err)
}
for i := range w.Accounts {
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
err = fmt.Errorf("can't unlock wallet: %w", err)
break
}
}
return w, err
}
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
wallets, err := openAlphabetWallets(v, walletDir) wallets, err := openAlphabetWallets(v, walletDir)
if err != nil { if err != nil {
@ -73,7 +53,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
err = nil err = nil
} else { } else {
err = fmt.Errorf("can't open alphabet wallet: %w", err) err = fmt.Errorf("can't open wallet: %w", err)
} }
break break
} }
@ -107,6 +87,16 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
return wallets, nil return wallets, nil
} }
func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
return actor.New(c, []actor.SignerAccount{{
Signer: transaction.Signer{
Account: committeeAcc.Contract.ScriptHash(),
Scopes: transaction.Global,
},
Account: committeeAcc,
}})
}
func ReadContract(ctrPath, ctrName string) (*ContractState, error) { func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef")) rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
if err != nil { if err != nil {

View file

@ -22,14 +22,15 @@ import (
) )
const ( const (
gasInitialTotalSupply = 30000000 * native.GASFactor
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
initialAlphabetGASAmount = 10_000 * native.GASFactor initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract. // initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor initialProxyGASAmount = 50_000 * native.GASFactor
) )
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
} }
func transferFunds(c *helper.InitializeContext) error { func transferFunds(c *helper.InitializeContext) error {
@ -41,11 +42,6 @@ func transferFunds(c *helper.InitializeContext) error {
return err return err
} }
version, err := c.Client.GetVersion()
if err != nil {
return err
}
var transfers []transferTarget var transfers []transferTarget
for _, acc := range c.Accounts { for _, acc := range c.Accounts {
to := acc.Contract.ScriptHash() to := acc.Contract.ScriptHash()
@ -63,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{ transferTarget{
Token: gas.Hash, Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(), Address: c.CommitteeAcc.Contract.ScriptHash(),
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), Amount: initialCommitteeGASAmount(c),
}, },
transferTarget{ transferTarget{
Token: neo.Hash, Token: neo.Hash,
@ -87,23 +83,16 @@ func transferFunds(c *helper.InitializeContext) error {
// transferFundsFinished checks balances of accounts we transfer GAS to. // transferFundsFinished checks balances of accounts we transfer GAS to.
// The stage is considered finished if the balance is greater than the half of what we need to transfer. // The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) { func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) acc := c.Accounts[0]
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
if err != nil {
return false, err
}
version, err := c.Client.GetVersion() r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { res, err := r.BalanceOf(acc.Contract.ScriptHash())
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
return false, err return false, err
} }
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
if err != nil { return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
return false, err
}
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
} }
func transferGASToProxy(c *helper.InitializeContext) error { func transferGASToProxy(c *helper.InitializeContext) error {

View file

@ -13,7 +13,7 @@ import (
) )
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) { func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
inv := invoker.New(c, nil) inv := invoker.New(c, nil)

View file

@ -6,9 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper"
) )
func initRegisterCmd() { func initRegisterCmd() {
@ -21,7 +19,6 @@ func initRegisterCmd() {
registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
} }
@ -51,7 +48,6 @@ func initDeleteCmd() {
deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
} }
@ -66,28 +62,3 @@ func deleteDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
cmd.Println("Domain deleted successfully") cmd.Println("Domain deleted successfully")
} }
func initSetAdminCmd() {
Cmd.AddCommand(setAdminCmd)
setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
_ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
_ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
}
func setAdmin(cmd *cobra.Command, _ []string) {
c, actor := nnsWriter(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
_, err = actor.Wait(h, vub, err)
commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
cmd.Println("Set admin successfully")
}

View file

@ -1,11 +1,7 @@
package nns package nns
import ( import (
"errors"
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
@ -17,35 +13,10 @@ import (
func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
v := viper.GetViper() v := viper.GetViper()
c, err := helper.NewRemoteClient(v) c, err := helper.GetN3Client(v)
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
var (
alphabet *helper.AlphabetWallets
regularWallets []*helper.RegularWallets
)
if alphabetWalletPath != "" {
alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
}
if walletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
}
if adminWalletPath != "" {
regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
}
if alphabet == nil && regularWallets == nil {
commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
}
ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
r := management.NewReader(ac.Invoker) r := management.NewReader(ac.Invoker)
@ -55,7 +26,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
} }
func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
inv := invoker.New(c, nil) inv := invoker.New(c, nil)

View file

@ -19,7 +19,6 @@ func initAddRecordCmd() {
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
@ -41,7 +40,6 @@ func initDelRecordsCmd() {
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
@ -54,7 +52,6 @@ func initDelRecordCmd() {
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)

View file

@ -39,7 +39,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: registerDomain, Run: registerDomain,
} }
@ -49,7 +48,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: deleteDomain, Run: deleteDomain,
} }
@ -77,7 +75,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: addRecord, Run: addRecord,
} }
@ -95,7 +92,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecords, Run: delRecords,
} }
@ -105,21 +101,9 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) { PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
}, },
Run: delRecord, Run: delRecord,
} }
setAdminCmd = &cobra.Command{
Use: "set-admin",
Short: "Sets admin for domain",
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
_ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
},
Run: setAdmin,
}
) )
func init() { func init() {
@ -132,5 +116,4 @@ func init() {
initGetRecordsCmd() initGetRecordsCmd()
initDelRecordsCmd() initDelRecordsCmd()
initDelRecordCmd() initDelRecordCmd()
initSetAdminCmd()
} }

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
} }
accHash := w.GetChangeAddress() accHash := w.GetChangeAddress()
addr, _ := cmd.Flags().GetString(walletAccountFlag) if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
if addr != "" {
accHash, err = address.StringToUint160(addr) accHash, err = address.StringToUint160(addr)
if err != nil { if err != nil {
return fmt.Errorf("invalid address: %s", addr) return fmt.Errorf("invalid address: %s", addr)
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash) return fmt.Errorf("can't find account for %s", accHash)
} }
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash)) prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt) pass, err := input.ReadPassword(prompt)
if err != nil { if err != nil {
return fmt.Errorf("can't get password: %v", err) return fmt.Errorf("can't get password: %v", err)
@ -73,16 +73,23 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err return err
} }
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) till := int64(defaultNotaryDepositLifetime)
if till <= 0 { tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime return errInvalidNotaryDepositLifetime
} }
}
return transferGas(cmd, acc, accHash, gasAmount, till) return transferGas(cmd, acc, accHash, gasAmount, till)
} }
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error { func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
if err != nil { if err != nil {
return err return err
} }

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
} }
func init() { func init() {

View file

@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
} }
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
c, err := helper.NewRemoteClient(viper.GetViper()) c, err := helper.GetN3Client(viper.GetViper())
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err) commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
inv := invoker.New(c, nil) inv := invoker.New(c, nil)

View file

@ -20,32 +20,23 @@ const (
accountAddressFlag = "account" accountAddressFlag = "account"
) )
func parseAddresses(cmd *cobra.Command) []util.Uint160 { func addProxyAccount(cmd *cobra.Command, _ []string) {
var addrs []util.Uint160 acc, _ := cmd.Flags().GetString(accountAddressFlag)
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
for _, acc := range accs {
addr, err := address.StringToUint160(acc) addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err) commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "addAccount")
addrs = append(addrs, addr)
}
return addrs
}
func addProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd)
err := processAccount(cmd, addrs, "addAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err) commonCmd.ExitOnErr(cmd, "processing error: %w", err)
} }
func removeProxyAccount(cmd *cobra.Command, _ []string) { func removeProxyAccount(cmd *cobra.Command, _ []string) {
addrs := parseAddresses(cmd) acc, _ := cmd.Flags().GetString(accountAddressFlag)
err := processAccount(cmd, addrs, "removeAccount") addr, err := address.StringToUint160(acc)
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
err = processAccount(cmd, addr, "removeAccount")
commonCmd.ExitOnErr(cmd, "processing error: %w", err) commonCmd.ExitOnErr(cmd, "processing error: %w", err)
} }
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil { if err != nil {
return fmt.Errorf("can't initialize context: %w", err) return fmt.Errorf("can't initialize context: %w", err)
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
} }
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
for _, addr := range addrs {
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
}
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
return err return err

View file

@ -29,15 +29,13 @@ var (
func initProxyAddAccount() { func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
} }
func initProxyRemoveAccount() { func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
} }

View file

@ -11,7 +11,6 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings" "strings"
"text/template" "text/template"
@ -106,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet")) fatalOnErr(errors.New("can't find account in wallet"))
} }
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
fatalOnErr(err) fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client var c *rpcclient.Client
var err error var err error
shuffled := slices.Clone(rpc) shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled { for _, endpoint := range shuffled {

View file

@ -9,6 +9,8 @@ import (
"io" "io"
"os" "os"
"slices" "slices"
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -76,29 +78,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers. // SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID { func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers() list := x.cliRes.Containers()
slices.SortFunc(list, cid.ID.Cmp) sort.Slice(list, func(i, j int) bool {
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
return strings.Compare(lhs, rhs) < 0
})
return list return list
} }
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
cliPrm := &client.PrmContainerListStream{
XHeaders: prm.XHeaders,
OwnerID: prm.OwnerID,
Session: prm.Session,
}
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
if err != nil {
return fmt.Errorf("init container list: %w", err)
}
err = rdr.Iterate(processCnr)
if err != nil {
return fmt.Errorf("read container list: %w", err)
}
return
}
// PutContainerPrm groups parameters of PutContainer operation. // PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct { type PutContainerPrm struct {
Client *client.Client Client *client.Client
@ -684,7 +670,9 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
return nil, fmt.Errorf("read object list: %w", err) return nil, fmt.Errorf("read object list: %w", err)
} }
slices.SortFunc(list, oid.ID.Cmp) slices.SortFunc(list, func(a, b oid.ID) int {
return strings.Compare(a.EncodeToString(), b.EncodeToString())
})
return &SearchObjectsRes{ return &SearchObjectsRes{
ids: list, ids: list,

View file

@ -28,7 +28,7 @@ const (
RPC = "rpc-endpoint" RPC = "rpc-endpoint"
RPCShorthand = "r" RPCShorthand = "r"
RPCDefault = "" RPCDefault = ""
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')" RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
Timeout = "timeout" Timeout = "timeout"
TimeoutShorthand = "t" TimeoutShorthand = "t"

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag) outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" { if outputPath != "" {
err := os.WriteFile(outputPath, overrideMarshalled, 0o644) err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err) commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else { } else {
fmt.Print("\n") fmt.Print("\n")

View file

@ -6,11 +6,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
// flags of list command. // flags of list command.
@ -54,58 +51,42 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm var prm internalclient.ListContainersPrm
prm.SetClient(cli) prm.SetClient(cli)
prm.OwnerID = idUser prm.Account = idUser
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
prmGet := internalclient.GetContainerPrm{ prmGet := internalclient.GetContainerPrm{
Client: cli, Client: cli,
} }
var containerIDs []cid.ID
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
printContainer(cmd, prmGet, id)
return false
})
if err == nil {
return
}
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
containerIDs = res.SortedIDList()
} else {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs { for _, cnrID := range containerIDs {
printContainer(cmd, prmGet, cnrID)
}
},
}
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
if flagVarListName == "" && !flagVarListPrintAttr { if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(id.String()) cmd.Println(cnrID.String())
return continue
} }
prmGet.ClientParams.ContainerID = &id prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet) res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil { if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err) cmd.Printf(" failed to read attributes: %v\n", err)
return continue
} }
cnr := res.Container() cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
return continue
} }
cmd.Println(id.String()) cmd.Println(cnrID.String())
if flagVarListPrintAttr { if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) { cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val) cmd.Printf(" %s: %s\n", key, val)
}) })
} }
}
},
} }
func initContainerListContainersCmd() { func initContainerListContainersCmd() {

View file

@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
nodes map[string]netmap.NodeInfo nodes map[string]netmap.NodeInfo
} }
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
return &policyPlaygroundREPL{ return &policyPlaygroundREPL{
cmd: cmd, cmd: cmd,
nodes: map[string]netmap.NodeInfo{}, nodes: map[string]netmap.NodeInfo{},
} }, nil
} }
func (repl *policyPlaygroundREPL) handleLs(args []string) error { func (repl *policyPlaygroundREPL) handleLs(args []string) error {
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
Long: `A REPL for testing placement policies. Long: `A REPL for testing placement policies.
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
repl := newPolicyPlaygroundREPL(cmd) repl, err := newPolicyPlaygroundREPL(cmd)
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
}, },
} }

View file

@ -0,0 +1,56 @@
package control
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
)
const ignoreErrorsFlag = "no-errors"
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
Long: "Evacuate objects from shard to other shards",
Run: evacuateShard,
Deprecated: "use frostfs-cli control shards evacuation start",
}
func evacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
signRequest(cmd, pk, req)
cli := getClient(cmd, pk)
var resp *control.EvacuateShardResponse
var err error
err = cli.ExecRaw(func(client *client.Client) error {
resp, err = control.EvacuateShard(client, req)
return err
})
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
cmd.Println("Shard has successfully been evacuated.")
}
func initControlEvacuateShardCmd() {
initControlFlags(evacuateShardCmd)
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}

View file

@ -21,7 +21,6 @@ const (
noProgressFlag = "no-progress" noProgressFlag = "no-progress"
scopeFlag = "scope" scopeFlag = "scope"
repOneOnlyFlag = "rep-one-only" repOneOnlyFlag = "rep-one-only"
ignoreErrorsFlag = "no-errors"
containerWorkerCountFlag = "container-worker-count" containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count" objectWorkerCountFlag = "object-worker-count"

View file

@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
var resp *control.GetNetmapStatusResponse var resp *control.GetNetmapStatusResponse
var err error var err error
err = cli.ExecRaw(func(client *rawclient.Client) error { err = cli.ExecRaw(func(client *rawclient.Client) error {
resp, err = control.GetNetmapStatus(cmd.Context(), client, req) resp, err = control.GetNetmapStatus(client, req)
return err return err
}) })
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)

View file

@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() { func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd) shardsCmd.AddCommand(setShardModeCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(evacuationShardCmd) shardsCmd.AddCommand(evacuationShardCmd)
shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd) shardsCmd.AddCommand(doctorCmd)
@ -22,6 +23,7 @@ func initControlShardsCmd() {
initControlShardsListCmd() initControlShardsListCmd()
initControlSetShardModeCmd() initControlSetShardModeCmd()
initControlEvacuateShardCmd()
initControlEvacuationShardCmd() initControlEvacuationShardCmd()
initControlFlushCacheCmd() initControlFlushCacheCmd()
initControlDoctorCmd() initControlDoctorCmd()

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -41,9 +42,7 @@ func initObjectHashCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format") flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
} }
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
tz := typ == hashTz
fullHash := len(ranges) == 0
if fullHash {
var headPrm internalclient.HeadObjectPrm
headPrm.SetClient(cli)
Prepare(cmd, &headPrm)
headPrm.SetAddress(objAddr)
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var cs checksum.Checksum
var csSet bool
if tz {
cs, csSet = res.Header().PayloadHomomorphicHash()
} else {
cs, csSet = res.Header().PayloadChecksum()
}
if csSet {
cmd.Println(hex.EncodeToString(cs.Value()))
} else {
cmd.Println("Missing checksum in object header.")
}
return
}
var hashPrm internalclient.HashPayloadRangesPrm var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli) hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm) Prepare(cmd, &hashPrm)
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt) hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges) hashPrm.SetRanges(ranges)
if typ == hashTz { if tz {
hashPrm.TZ() hashPrm.TZ()
} }

View file

@ -320,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
for _, object := range objects { for _, object := range objects {
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
for repIdx, rep := range placement { for repIdx, rep := range placement {
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
@ -358,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
placementObjectID = object.ecHeader.parent placementObjectID = object.ecHeader.parent
} }
placementBuilder := placement.NewNetworkMapBuilder(netmap) placementBuilder := placement.NewNetworkMapBuilder(netmap)
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
for _, vector := range placement { for _, vector := range placement {

View file

@ -46,7 +46,7 @@ func initObjectPatchCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
@ -99,9 +99,11 @@ func patch(cmd *cobra.Command, _ []string) {
} }
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag(newAttrsFlagName).Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -50,7 +50,7 @@ func initObjectPutCmd() {
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
@ -214,9 +214,11 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
} }
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
rawAttrs, err := cmd.Flags().GetStringSlice("attributes") var rawAttrs []string
if err != nil {
return nil, err raw := cmd.Flag("attributes").Value.String()
if len(raw) != 0 {
rawAttrs = strings.Split(raw, ",")
} }
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes

View file

@ -38,7 +38,7 @@ func initObjectRangeCmd() {
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String("range", "", "Range to take data from in the form offset:length")
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
flags.Bool(rawFlag, false, rawFlagDesc) flags.Bool(rawFlag, false, rawFlagDesc)
} }
@ -195,10 +195,11 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
} }
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
vs, err := cmd.Flags().GetStringSlice("range") v := cmd.Flag("range").Value.String()
if len(vs) == 0 || err != nil { if len(v) == 0 {
return nil, err return nil, nil
} }
vs := strings.Split(v, ",")
rs := make([]objectSDK.Range, len(vs)) rs := make([]objectSDK.Range, len(vs))
for i := range vs { for i := range vs {
before, after, found := strings.Cut(vs[i], rangeSep) before, after, found := strings.Cut(vs[i], rangeSep)

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -33,9 +34,11 @@ func _client() (tree.TreeServiceClient, error) {
opts := []grpc.DialOption{ opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor( grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(), tracing.NewUnaryClientInteceptor(),
), ),
grpc.WithChainStreamInterceptor( grpc.WithChainStreamInterceptor(
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(),
), ),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),

View file

@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated") log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil { if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} else { } else {
c.init(ctx) c.init(ctx)

View file

@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
innerRing.Stop(ctx) innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil { if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
if err := pprofCmp.shutdown(ctx); err != nil { if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err), zap.String("error", err.Error()),
) )
} }

View file

@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated") log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil { if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.Error(err)) zap.String("error", err.Error()))
return return
} }

View file

@ -2,17 +2,13 @@ package meta
import ( import (
"context" "context"
"encoding/binary"
"errors"
"fmt" "fmt"
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview" "github.com/rivo/tview"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/bbolt"
) )
var tuiCMD = &cobra.Command{ var tuiCMD = &cobra.Command{
@ -31,11 +27,6 @@ Available search filters:
var initialPrompt string var initialPrompt string
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
2: schema.MetabaseParserV2,
3: schema.MetabaseParserV3,
}
func init() { func init() {
common.AddComponentPathFlag(tuiCMD, &vPath) common.AddComponentPathFlag(tuiCMD, &vPath)
@ -58,22 +49,12 @@ func runTUI(cmd *cobra.Command) error {
} }
defer db.Close() defer db.Close()
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
if !hasVersion {
return errors.New("couldn't detect schema version")
}
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
if !ok {
return fmt.Errorf("unknown schema version %d", schemaVersion)
}
// Need if app was stopped with Ctrl-C. // Need if app was stopped with Ctrl-C.
ctx, cancel := context.WithCancel(cmd.Context()) ctx, cancel := context.WithCancel(cmd.Context())
defer cancel() defer cancel()
app := tview.NewApplication() app := tview.NewApplication()
ui := tui.NewUI(ctx, app, db, metabaseParser, nil) ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
_ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("cid", tui.CIDParser, "CID")
_ = ui.AddFilter("oid", tui.OIDParser, "OID") _ = ui.AddFilter("oid", tui.OIDParser, "OID")
@ -88,31 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui) app.SetRoot(ui, true).SetFocus(ui)
return app.Run() return app.Run()
} }
var (
shardInfoBucket = []byte{5}
versionRecord = []byte("version")
)
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
err := db.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(shardInfoBucket)
if bkt == nil {
return nil
}
rec := bkt.Get(versionRecord)
if rec == nil {
return nil
}
version = binary.LittleEndian.Uint64(rec)
ok = true
return nil
})
if err != nil {
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
}
return
}

View file

@ -80,15 +80,10 @@ var (
}, },
) )
UserAttributeParserV2 = NewUserAttributeKeyBucketParser( UserAttributeParser = NewUserAttributeKeyBucketParser(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
) )
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
)
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
cidResolver: StrictResolver, cidResolver: StrictResolver,
oidResolver: StrictResolver, oidResolver: StrictResolver,
@ -113,14 +108,4 @@ var (
cidResolver: StrictResolver, cidResolver: StrictResolver,
oidResolver: LenientResolver, oidResolver: LenientResolver,
}) })
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
cidResolver: LenientResolver,
oidResolver: LenientResolver,
})
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
cidResolver: StrictResolver,
oidResolver: LenientResolver,
})
) )

View file

@ -22,8 +22,6 @@ const (
Split Split
ContainerCounters ContainerCounters
ECInfo ECInfo
ExpirationEpochToObject
ObjectToExpirationEpoch
) )
var x = map[Prefix]string{ var x = map[Prefix]string{
@ -45,8 +43,6 @@ var x = map[Prefix]string{
Split: "Split", Split: "Split",
ContainerCounters: "Container Counters", ContainerCounters: "Container Counters",
ECInfo: "EC Info", ECInfo: "EC Info",
ExpirationEpochToObject: "Exp. Epoch to Object",
ObjectToExpirationEpoch: "Object to Exp. Epoch",
} }
func (p Prefix) String() string { func (p Prefix) String() string {

View file

@ -9,7 +9,7 @@ import (
func (b *PrefixBucket) String() string { func (b *PrefixBucket) String() string {
return common.FormatSimple( return common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
) )
} }
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
return fmt.Sprintf( return fmt.Sprintf(
"%s CID %s", "%s CID %s",
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
), ),
common.FormatSimple(b.id.String(), tcell.ColorAqua), common.FormatSimple(b.id.String(), tcell.ColorAqua),
) )
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
func (b *UserAttributeKeyBucket) String() string { func (b *UserAttributeKeyBucket) String() string {
return fmt.Sprintf("%s CID %s ATTR-KEY %s", return fmt.Sprintf("%s CID %s ATTR-KEY %s",
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
), ),
common.FormatSimple( common.FormatSimple(
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,

View file

@ -2,7 +2,6 @@ package buckets
import ( import (
"errors" "errors"
"slices"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -62,7 +61,6 @@ var (
ErrInvalidKeyLength = errors.New("invalid key length") ErrInvalidKeyLength = errors.New("invalid key length")
ErrInvalidValueLength = errors.New("invalid value length") ErrInvalidValueLength = errors.New("invalid value length")
ErrInvalidPrefix = errors.New("invalid prefix") ErrInvalidPrefix = errors.New("invalid prefix")
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
) )
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
@ -134,10 +132,6 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
} }
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
}
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if value != nil { if value != nil {
return nil, nil, ErrNotBucket return nil, nil, ErrNotBucket
@ -153,11 +147,6 @@ func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []
return nil, nil, err return nil, nil, err
} }
b.key = string(key[33:]) b.key = string(key[33:])
if len(keys) != 0 && !slices.Contains(keys, b.key) {
return nil, nil, ErrUnexpectedAttributeKey
}
return &b, next, nil return &b, next, nil
} }
} }

View file

@ -5,30 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
) )
var MetabaseParserV3 = common.WithFallback( var MetabaseParser = common.WithFallback(
common.Any(
buckets.GraveyardParser,
buckets.GarbageParser,
buckets.ContainerVolumeParser,
buckets.LockedParser,
buckets.ShardInfoParser,
buckets.PrimaryParser,
buckets.LockersParser,
buckets.TombstoneParser,
buckets.SmallParser,
buckets.RootParser,
buckets.UserAttributeParserV3,
buckets.ParentParser,
buckets.SplitParser,
buckets.ContainerCountersParser,
buckets.ECInfoParser,
buckets.ExpirationEpochToObjectParser,
buckets.ObjectToExpirationEpochParser,
),
common.RawParser.ToFallbackParser(),
)
var MetabaseParserV2 = common.WithFallback(
common.Any( common.Any(
buckets.GraveyardParser, buckets.GraveyardParser,
buckets.GarbageParser, buckets.GarbageParser,
@ -41,7 +18,7 @@ var MetabaseParserV2 = common.WithFallback(
buckets.SmallParser, buckets.SmallParser,
buckets.RootParser, buckets.RootParser,
buckets.OwnerParser, buckets.OwnerParser,
buckets.UserAttributeParserV2, buckets.UserAttributeParser,
buckets.PayloadHashParser, buckets.PayloadHashParser,
buckets.ParentParser, buckets.ParentParser,
buckets.SplitParser, buckets.SplitParser,

View file

@ -63,11 +63,3 @@ func (r *ContainerCountersRecord) DetailedString() string {
func (r *ECInfoRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string {
return spew.Sdump(*r) return spew.Sdump(*r)
} }
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
return spew.Sdump(*r)
}
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
return spew.Sdump(*r)
}

View file

@ -143,26 +143,3 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
return common.No return common.No
} }
} }
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "cid":
id := val.(cid.ID)
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
switch typ {
case "oid":
id := val.(oid.ID)
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
default:
return common.No
}
}

View file

@ -249,45 +249,3 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
} }
return &r, nil, nil return &r, nil, nil
} }
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 72 {
return nil, nil, ErrInvalidKeyLength
}
var (
r ExpirationEpochToObjectRecord
err error
)
r.epoch = binary.BigEndian.Uint64(key[:8])
if err = r.cnt.Decode(key[8:40]); err != nil {
return nil, nil, err
}
if err = r.obj.Decode(key[40:]); err != nil {
return nil, nil, err
}
return &r, nil, nil
}
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
if len(key) != 32 {
return nil, nil, ErrInvalidKeyLength
}
if len(value) != 8 {
return nil, nil, ErrInvalidValueLength
}
var (
r ObjectToExpirationEpochRecord
err error
)
if err = r.obj.Decode(key); err != nil {
return nil, nil, err
}
r.epoch = binary.LittleEndian.Uint64(value)
return &r, nil, nil
}

View file

@ -2,7 +2,6 @@ package records
import ( import (
"fmt" "fmt"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
"github.com/gdamore/tcell/v2" "github.com/gdamore/tcell/v2"
@ -134,22 +133,3 @@ func (r *ECInfoRecord) String() string {
len(r.ids), len(r.ids),
) )
} }
func (r *ExpirationEpochToObjectRecord) String() string {
return fmt.Sprintf(
"exp. epoch %s %c CID %s OID %s",
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
)
}
func (r *ObjectToExpirationEpochRecord) String() string {
return fmt.Sprintf(
"OID %s %c exp. epoch %s",
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
tview.Borders.Vertical,
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
)
}

View file

@ -79,15 +79,4 @@ type (
id oid.ID id oid.ID
ids []oid.ID ids []oid.ID
} }
ExpirationEpochToObjectRecord struct {
epoch uint64
cnt cid.ID
obj oid.ID
}
ObjectToExpirationEpochRecord struct {
obj oid.ID
epoch uint64
}
) )

View file

@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
path := parentBucket.Path path := parentBucket.Path
parser := parentBucket.NextParser parser := parentBucket.NextParser
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
if err != nil {
return err
}
for item := range buffer { for item := range buffer {
if item.err != nil { if item.err != nil {
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
} }
bucket := item.val bucket := item.val
var err error
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
if err != nil { if err != nil {
return err return err
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
defer cancel() defer cancel()
// Check the current bucket's nested buckets if exist // Check the current bucket's nested buckets if exist
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range bucketsBuffer { for item := range bucketsBuffer {
if item.err != nil { if item.err != nil {
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
b := item.val b := item.val
var err error
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
if err != nil { if err != nil {
return false, err return false, err
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
// Check the current bucket's nested records if exist // Check the current bucket's nested records if exist
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
if err != nil {
return false, err
}
for item := range recordsBuffer { for item := range recordsBuffer {
if item.err != nil { if item.err != nil {
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
} }
r := item.val r := item.val
var err error
r.Entry, _, err = bucket.NextParser(r.Key, r.Value) r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
if err != nil { if err != nil {
return false, err return false, err

View file

@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
func load[T any]( func load[T any](
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
filter func(key, value []byte) bool, transform func(key, value []byte) T, filter func(key, value []byte) bool, transform func(key, value []byte) T,
) <-chan Item[T] { ) (<-chan Item[T], error) {
buffer := make(chan Item[T], bufferSize) buffer := make(chan Item[T], bufferSize)
go func() { go func() {
@ -77,13 +77,13 @@ func load[T any](
} }
}() }()
return buffer return buffer, nil
} }
func LoadBuckets( func LoadBuckets(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Bucket] { ) (<-chan Item[*Bucket], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value == nil return value == nil
@ -98,14 +98,17 @@ func LoadBuckets(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
func LoadRecords( func LoadRecords(
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
) <-chan Item[*Record] { ) (<-chan Item[*Record], error) {
buffer := load( buffer, err := load(
ctx, db, path, bufferSize, ctx, db, path, bufferSize,
func(_, value []byte) bool { func(_, value []byte) bool {
return value != nil return value != nil
@ -121,8 +124,11 @@ func LoadRecords(
} }
}, },
) )
if err != nil {
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
}
return buffer return buffer, nil
} }
// HasBuckets checks if a bucket has nested buckets. It relies on assumption // HasBuckets checks if a bucket has nested buckets. It relies on assumption
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
buffer := load( buffer, err := load(
ctx, db, path, 1, ctx, db, path, 1,
nil, nil,
func(_, value []byte) []byte { return value }, func(_, value []byte) []byte { return value },
) )
if err != nil {
return false, err
}
x, ok := <-buffer x, ok := <-buffer
if !ok { if !ok {
return false, nil return false, nil
} }
if x.err != nil { if x.err != nil {
return false, x.err return false, err
} }
if x.val != nil { if x.val != nil {
return false, nil return false, err
} }
return true, nil return true, nil
} }

View file

@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
ctx, v.onUnmount = context.WithCancel(ctx) ctx, v.onUnmount = context.WithCancel(ctx)
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
if err != nil {
return err
}
v.buffer = make(chan *Record, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize)
go func() { go func() {
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
for item := range tempBuffer { for item := range tempBuffer {
if item.err != nil { if item.err != nil {
v.ui.stopOnError(item.err) v.ui.stopOnError(err)
break break
} }
record := item.val record := item.val
var err error
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
if err != nil { if err != nil {
v.ui.stopOnError(err) v.ui.stopOnError(err)

View file

@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
c.cfgMorph.client,
apemanager.WithLogger(c.log)) apemanager.WithLogger(c.log))
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"sync" "sync"
"time" "time"
@ -17,7 +16,7 @@ import (
"github.com/hashicorp/golang-lru/v2/expirable" "github.com/hashicorp/golang-lru/v2/expirable"
) )
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type netValueReader[K any, V any] func(K) (V, error)
type valueWithError[V any] struct { type valueWithError[V any] struct {
v V v V
@ -50,7 +49,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
// updates the value from the network on cache miss or by TTL. // updates the value from the network on cache miss or by TTL.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { func (c *ttlNetCache[K, V]) get(key K) (V, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -72,7 +71,7 @@ func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
return val.v, val.e return val.v, val.e
} }
v, err := c.netRdr(ctx, key) v, err := c.netRdr(key)
c.cache.Add(key, &valueWithError[V]{ c.cache.Add(key, &valueWithError[V]{
v: v, v: v,
@ -136,7 +135,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]
// updates the value from the network on cache miss. // updates the value from the network on cache miss.
// //
// returned value should not be modified. // returned value should not be modified.
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -149,7 +148,7 @@ func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, e
return val, nil return val, nil
} }
val, err := c.netRdr(ctx, key) val, err := c.netRdr(key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -167,11 +166,11 @@ type ttlContainerStorage struct {
} }
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(ctx, id) return v.Get(id)
}, metrics.NewCacheMetrics("container")) }, metrics.NewCacheMetrics("container"))
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(ctx, id) return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info")) }, metrics.NewCacheMetrics("container_deletion_info"))
return ttlContainerStorage{ return ttlContainerStorage{
@ -189,12 +188,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
// Get returns container value from the cache. If value is missing in the cache // Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache. // or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(ctx, cnr) return s.containerCache.get(cnr)
} }
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(ctx, cnr) return s.delInfoCache.get(cnr)
} }
type lruNetmapSource struct { type lruNetmapSource struct {
@ -206,8 +205,8 @@ type lruNetmapSource struct {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10 const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(ctx, key) return v.GetNetMapByEpoch(key)
}, metrics.NewCacheMetrics("netmap")) }, metrics.NewCacheMetrics("netmap"))
return &lruNetmapSource{ return &lruNetmapSource{
@ -216,16 +215,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
} }
} }
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
} }
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
return s.getNetMapByEpoch(ctx, epoch) return s.getNetMapByEpoch(epoch)
} }
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
val, err := s.cache.get(ctx, epoch) val, err := s.cache.get(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -233,7 +232,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*
return val, nil return val, nil
} }
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { func (s *lruNetmapSource) Epoch() (uint64, error) {
return s.netState.CurrentEpoch(), nil return s.netState.CurrentEpoch(), nil
} }
@ -241,10 +240,7 @@ type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte] *ttlNetCache[struct{}, [][]byte]
} }
func newCachedIRFetcher(f interface { func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
InnerRingKeys(ctx context.Context) ([][]byte, error)
},
) cachedIRFetcher {
const ( const (
irFetcherCacheSize = 1 // we intend to store only one value irFetcherCacheSize = 1 // we intend to store only one value
@ -258,8 +254,8 @@ func newCachedIRFetcher(f interface {
) )
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(ctx context.Context, _ struct{}) ([][]byte, error) { func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys(ctx) return f.InnerRingKeys()
}, metrics.NewCacheMetrics("ir_keys"), }, metrics.NewCacheMetrics("ir_keys"),
) )
@ -269,8 +265,8 @@ func newCachedIRFetcher(f interface {
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates // the cache or expired, then it returns keys from side chain and updates
// the cache. // the cache.
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(ctx, struct{}{}) val, err := f.get(struct{}{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -293,7 +289,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
} }
} }
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
const ttl = time.Second * 30 const ttl = time.Second * 30
hit := false hit := false
@ -315,7 +311,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
c.mtx.Lock() c.mtx.Lock()
size = c.lastSize size = c.lastSize
if !c.lastUpdated.After(prevUpdated) { if !c.lastUpdated.After(prevUpdated) {
size = c.src.MaxObjectSize(ctx) size = c.src.MaxObjectSize()
c.lastSize = size c.lastSize = size
c.lastUpdated = time.Now() c.lastUpdated = time.Now()
} }

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"errors" "errors"
"testing" "testing"
"time" "time"
@ -18,7 +17,7 @@ func TestTTLNetCache(t *testing.T) {
t.Run("Test Add and Get", func(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ti, val) require.Equal(t, ti, val)
}) })
@ -27,7 +26,7 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
time.Sleep(2 * ttlDuration) time.Sleep(2 * ttlDuration)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
@ -36,20 +35,20 @@ func TestTTLNetCache(t *testing.T) {
ti := time.Now() ti := time.Now()
cache.set(key, ti, nil) cache.set(key, ti, nil)
cache.remove(key) cache.remove(key)
val, err := cache.get(context.Background(), key) val, err := cache.get(key)
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, val, ti) require.NotEqual(t, val, ti)
}) })
t.Run("Test Cache Error", func(t *testing.T) { t.Run("Test Cache Error", func(t *testing.T) {
cache.set("error", time.Now(), errors.New("mock error")) cache.set("error", time.Now(), errors.New("mock error"))
_, err := cache.get(context.Background(), "error") _, err := cache.get("error")
require.Error(t, err) require.Error(t, err)
require.Equal(t, "mock error", err.Error()) require.Equal(t, "mock error", err.Error())
}) })
} }
func testNetValueReader(_ context.Context, key string) (time.Time, error) { func testNetValueReader(key string) (time.Time, error) {
if key == "error" { if key == "error" {
return time.Now(), errors.New("mock error") return time.Now(), errors.New("mock error")
} }

View file

@ -33,7 +33,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@ -70,7 +69,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -136,7 +134,6 @@ type shardCfg struct {
refillMetabase bool refillMetabase bool
refillMetabaseWorkersCount int refillMetabaseWorkersCount int
mode shardmode.Mode mode shardmode.Mode
limiter qos.Limiter
metaCfg struct { metaCfg struct {
path string path string
@ -256,42 +253,39 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
} }
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
var target shardCfg var newConfig shardCfg
target.refillMetabase = source.RefillMetabase() newConfig.refillMetabase = oldConfig.RefillMetabase()
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
target.mode = source.Mode() newConfig.mode = oldConfig.Mode()
target.compress = source.Compress() newConfig.compress = oldConfig.Compress()
target.estimateCompressibility = source.EstimateCompressibility() newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold() newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
target.uncompressableContentType = source.UncompressableContentTypes() newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
target.smallSizeObjectLimit = source.SmallSizeLimit() newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
a.setShardWriteCacheConfig(&target, source) a.setShardWriteCacheConfig(&newConfig, oldConfig)
a.setShardPiloramaConfig(c, &target, source) a.setShardPiloramaConfig(c, &newConfig, oldConfig)
if err := a.setShardStorageConfig(&target, source); err != nil { if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
return err return err
} }
a.setMetabaseConfig(&target, source) a.setMetabaseConfig(&newConfig, oldConfig)
a.setGCConfig(&target, source) a.setGCConfig(&newConfig, oldConfig)
if err := a.setLimiter(&target, source); err != nil {
return err
}
a.EngineCfg.shards = append(a.EngineCfg.shards, target) a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
return nil return nil
} }
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
writeCacheCfg := source.WriteCache() writeCacheCfg := oldConfig.WriteCache()
if writeCacheCfg.Enabled() { if writeCacheCfg.Enabled() {
wc := &target.writecacheCfg wc := &newConfig.writecacheCfg
wc.enabled = true wc.enabled = true
wc.path = writeCacheCfg.Path() wc.path = writeCacheCfg.Path()
@ -304,10 +298,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, so
} }
} }
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
if config.BoolSafe(c.Sub("tree"), "enabled") { if config.BoolSafe(c.Sub("tree"), "enabled") {
piloramaCfg := source.Pilorama() piloramaCfg := oldConfig.Pilorama()
pr := &target.piloramaCfg pr := &newConfig.piloramaCfg
pr.enabled = true pr.enabled = true
pr.path = piloramaCfg.Path() pr.path = piloramaCfg.Path()
@ -318,8 +312,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, targ
} }
} }
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
blobStorCfg := source.BlobStor() blobStorCfg := oldConfig.BlobStor()
storagesCfg := blobStorCfg.Storages() storagesCfg := blobStorCfg.Storages()
ss := make([]subStorageCfg, 0, len(storagesCfg)) ss := make([]subStorageCfg, 0, len(storagesCfg))
@ -353,13 +347,13 @@ func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, sourc
ss = append(ss, sCfg) ss = append(ss, sCfg)
} }
target.subStorages = ss newConfig.subStorages = ss
return nil return nil
} }
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
metabaseCfg := source.Metabase() metabaseCfg := oldConfig.Metabase()
m := &target.metaCfg m := &newConfig.metaCfg
m.path = metabaseCfg.Path() m.path = metabaseCfg.Path()
m.perm = metabaseCfg.BoltDB().Perm() m.perm = metabaseCfg.BoltDB().Perm()
@ -367,25 +361,12 @@ func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *s
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
} }
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
gcCfg := source.GC() gcCfg := oldConfig.GC()
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
}
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
limitsConfig := source.Limits()
limiter, err := qos.NewLimiter(limitsConfig)
if err != nil {
return err
}
if target.limiter != nil {
target.limiter.Close()
}
target.limiter = limiter
return nil
} }
// internals contains application-specific internals that are created // internals contains application-specific internals that are created
@ -512,7 +493,6 @@ type cfg struct {
cfgNetmap cfgNetmap cfgNetmap cfgNetmap
cfgControlService cfgControlService cfgControlService cfgControlService
cfgObject cfgObject cfgObject cfgObject
cfgQoSService cfgQoSService
} }
// ReadCurrentNetMap reads network map which has been cached at the // ReadCurrentNetMap reads network map which has been cached at the
@ -547,8 +527,6 @@ type cfgGRPC struct {
maxChunkSize uint64 maxChunkSize uint64
maxAddrAmount uint64 maxAddrAmount uint64
reconnectTimeout time.Duration reconnectTimeout time.Duration
limiter atomic.Pointer[limiting.SemaphoreLimiter]
} }
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
@ -613,6 +591,8 @@ type cfgMorph struct {
client *client.Client client *client.Client
notaryEnabled bool
// TTL of Sidechain cached values. Non-positive value disables caching. // TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration cacheTTL time.Duration
@ -631,7 +611,6 @@ type cfgContainer struct {
parsers map[event.Type]event.NotificationParser parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers workerPool util.WorkerPool // pool for asynchronous handlers
containerBatchSize uint32
} }
type cfgFrostfsID struct { type cfgFrostfsID struct {
@ -685,6 +664,10 @@ type cfgAccessPolicyEngine struct {
} }
type cfgObjectRoutines struct { type cfgObjectRoutines struct {
putRemote *ants.Pool
putLocal *ants.Pool
replication *ants.Pool replication *ants.Pool
} }
@ -716,7 +699,8 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector netState.metrics = c.metricsCollector
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm) log, err := logger.NewLogger(logPrm)
fatalOnErr(err) fatalOnErr(err)
@ -869,14 +853,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
} }
} }
func initCfgGRPC() (cfg cfgGRPC) { func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
cfg.maxChunkSize = maxChunkSize return cfgGRPC{
cfg.maxAddrAmount = maxAddrAmount maxChunkSize: maxChunkSize,
maxAddrAmount: maxAddrAmount,
return }
} }
func initCfgObject(appCfg *config.Config) cfgObject { func initCfgObject(appCfg *config.Config) cfgObject {
@ -1072,12 +1056,11 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return pool return pool
}), }),
shard.WithLimiter(shCfg.limiter),
} }
return sh return sh
} }
func (c *cfg) loggerPrm() *logger.Prm { func (c *cfg) loggerPrm() (*logger.Prm, error) {
// check if it has been inited before // check if it has been inited before
if c.dynamicConfiguration.logger == nil { if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm) c.dynamicConfiguration.logger = new(logger.Prm)
@ -1096,7 +1079,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
} }
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger return c.dynamicConfiguration.logger, nil
} }
func (c *cfg) LocalAddress() network.AddressGroup { func (c *cfg) LocalAddress() network.AddressGroup {
@ -1138,7 +1121,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
err := ls.Close(context.WithoutCancel(ctx)) err := ls.Close(context.WithoutCancel(ctx))
if err != nil { if err != nil {
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.Error(err), zap.String("error", err.Error()),
) )
} else { } else {
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
@ -1165,7 +1148,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg) cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
} }
@ -1184,7 +1167,21 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
var err error var err error
optNonBlocking := ants.WithNonblocking(true)
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
fatalOnErr(err)
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
fatalOnErr(err)
replicatorPoolSize := replicatorconfig.PoolSize(cfg) replicatorPoolSize := replicatorconfig.PoolSize(cfg)
if replicatorPoolSize <= 0 {
replicatorPoolSize = putRemoteCapacity
}
pool.replication, err = ants.NewPool(replicatorPoolSize) pool.replication, err = ants.NewPool(replicatorPoolSize)
fatalOnErr(err) fatalOnErr(err)
@ -1210,11 +1207,11 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
} }
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
ni, err := c.netmapLocalNodeState(ctx, epoch) ni, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch), zap.Uint64("epoch", epoch),
zap.Error(err)) zap.String("error", err.Error()))
return return
} }
@ -1224,9 +1221,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration. // with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil. // The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
ni := c.cfgNodeInfo.localInfo ni := c.cfgNodeInfo.localInfo
ni.SetStatus(state) stateSetter(&ni)
prm := nmClient.AddPeerPrm{} prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni) prm.SetNodeInfo(ni)
@ -1236,7 +1233,9 @@ func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) er
// bootstrapOnline calls cfg.bootstrapWithState with "online" state. // bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(ctx context.Context, c *cfg) error { func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, netmap.Online) return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
} }
// bootstrap calls bootstrapWithState with: // bootstrap calls bootstrapWithState with:
@ -1247,7 +1246,9 @@ func (c *cfg) bootstrap(ctx context.Context) error {
st := c.cfgNetmap.state.controlNetmapStatus() st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE { if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, netmap.Maintenance) return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
} }
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
@ -1338,7 +1339,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// Logger // Logger
logPrm := c.loggerPrm() logPrm, err := c.loggerPrm()
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
components := c.getComponents(ctx, logPrm) components := c.getComponents(ctx, logPrm)
@ -1414,13 +1419,17 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
} }
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
return components return components
} }
func (c *cfg) reloadPools() error { func (c *cfg) reloadPools() error {
newSize := replicatorconfig.PoolSize(c.appCfg) newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
newSize = replicatorconfig.PoolSize(c.appCfg)
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
return nil return nil
@ -1457,7 +1466,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
return container.NewInfoProvider(func() (container.Source, error) { return container.NewInfoProvider(func() (container.Source, error) {
c.initMorphComponents(ctx) c.initMorphComponents(ctx)
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,7 +1,6 @@
package config package config
import ( import (
"slices"
"strings" "strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used // It supports only one level of nesting and is intended to be used
// to provide default values. // to provide default values.
func (x *Config) SetDefault(from *Config) { func (x *Config) SetDefault(from *Config) {
x.defaultPath = slices.Clone(from.path) x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
} }

View file

@ -1,27 +0,0 @@
package containerconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)
// ContainerBatchSize returns the value of "batch_size" config parameter
// from "list_stream" subsection of "container" section.
//
// Returns ContainerBatchSizeDefault if the value is missing or if
// the value is not positive integer.
func ContainerBatchSize(c *config.Config) uint32 {
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
return ContainerBatchSizeDefault
}
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
if size == 0 {
return ContainerBatchSizeDefault
}
return size
}

View file

@ -1,27 +0,0 @@
package containerconfig_test
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestContainerSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -11,7 +11,6 @@ import (
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
@ -77,7 +76,6 @@ func TestEngineSection(t *testing.T) {
ss := blob.Storages() ss := blob.Storages()
pl := sc.Pilorama() pl := sc.Pilorama()
gc := sc.GC() gc := sc.GC()
limits := sc.Limits()
switch num { switch num {
case 0: case 0:
@ -136,75 +134,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
require.ElementsMatch(t, readLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(20),
ReservedOps: toPtr(1000),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(70),
ReservedOps: toPtr(10000),
},
{
Tag: "background",
Weight: toPtr(5),
LimitOps: toPtr(10000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,
[]limitsconfig.IOTagConfig{
{
Tag: "internal",
Weight: toPtr(200),
ReservedOps: toPtr(100),
LimitOps: toPtr(0),
},
{
Tag: "client",
Weight: toPtr(700),
ReservedOps: toPtr(1000),
},
{
Tag: "background",
Weight: toPtr(50),
LimitOps: toPtr(1000),
ReservedOps: toPtr(0),
},
{
Tag: "writecache",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
{
Tag: "policer",
Weight: toPtr(50),
LimitOps: toPtr(2500),
},
})
case 1: case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm()) require.Equal(t, fs.FileMode(0o644), pl.Perm())
@ -259,17 +188,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
readLimits := limits.Read()
writeLimits := limits.Write()
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
require.Equal(t, 0, len(readLimits.Tags))
require.Equal(t, 0, len(writeLimits.Tags))
} }
return nil return nil
}) })
@ -283,7 +201,3 @@ func TestEngineSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest) configtest.ForEnvFileType(t, path, fileConfigTest)
}) })
} }
func toPtr(v float64) *float64 {
return &v
}

View file

@ -4,7 +4,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
@ -126,14 +125,6 @@ func (x *Config) GC() *gcconfig.Config {
) )
} }
// Limits returns "limits" subsection as a limitsconfig.Config.
func (x *Config) Limits() *limitsconfig.Config {
return limitsconfig.From(
(*config.Config)(x).
Sub("limits"),
)
}
// RefillMetabase returns the value of "resync_metabase" config parameter. // RefillMetabase returns the value of "resync_metabase" config parameter.
// //
// Returns false if the value is not a valid bool. // Returns false if the value is not a valid bool.

View file

@ -1,130 +0,0 @@
package limits
import (
"math"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/spf13/cast"
)
const (
NoLimit int64 = math.MaxInt64
DefaultIdleTimeout = 5 * time.Minute
)
// From wraps config section into Config.
func From(c *config.Config) *Config {
return (*Config)(c)
}
// Config is a wrapper over the config section
// which provides access to Shard's limits configurations.
type Config config.Config
// Read returns the value of "read" limits config section.
func (x *Config) Read() OpConfig {
return x.parse("read")
}
// Write returns the value of "write" limits config section.
func (x *Config) Write() OpConfig {
return x.parse("write")
}
func (x *Config) parse(sub string) OpConfig {
c := (*config.Config)(x).Sub(sub)
var result OpConfig
if s := config.Int(c, "max_waiting_ops"); s > 0 {
result.MaxWaitingOps = s
} else {
result.MaxWaitingOps = NoLimit
}
if s := config.Int(c, "max_running_ops"); s > 0 {
result.MaxRunningOps = s
} else {
result.MaxRunningOps = NoLimit
}
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
result.IdleTimeout = s
} else {
result.IdleTimeout = DefaultIdleTimeout
}
result.Tags = tags(c)
return result
}
type OpConfig struct {
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxWaitingOps int64
// MaxRunningOps returns the value of "max_running_ops" config parameter.
//
// Equals NoLimit if the value is not a positive number.
MaxRunningOps int64
// IdleTimeout returns the value of "idle_timeout" config parameter.
//
// Equals DefaultIdleTimeout if the value is not a valid duration.
IdleTimeout time.Duration
// Tags returns the value of "tags" config parameter.
//
// Equals nil if the value is not a valid tags config slice.
Tags []IOTagConfig
}
type IOTagConfig struct {
Tag string
Weight *float64
LimitOps *float64
ReservedOps *float64
}
func tags(c *config.Config) []IOTagConfig {
c = c.Sub("tags")
var result []IOTagConfig
for i := 0; ; i++ {
tag := config.String(c, strconv.Itoa(i)+".tag")
if tag == "" {
return result
}
var tagConfig IOTagConfig
tagConfig.Tag = tag
v := c.Value(strconv.Itoa(i) + ".weight")
if v != nil {
w, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.Weight = &w
}
v = c.Value(strconv.Itoa(i) + ".limit_ops")
if v != nil {
l, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.LimitOps = &l
}
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
if v != nil {
r, err := cast.ToFloat64E(v)
panicOnErr(err)
tagConfig.ReservedOps = &r
}
result = append(result, tagConfig)
}
}
func panicOnErr(err error) {
if err != nil {
panic(err)
}
}

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
// //
// Returns PermDefault if the value is not a positive number. // Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe(l.cfg, "perm") p := config.UintSafe((*config.Config)(l.cfg), "perm")
if p == 0 { if p == 0 {
p = PermDefault p = PermDefault
} }
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
// //
// Returns false if the value is not a boolean. // Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool { func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe(l.cfg, "no_sync") return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
} }
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode. // CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -21,6 +21,10 @@ const (
putSubsection = "put" putSubsection = "put"
getSubsection = "get" getSubsection = "get"
// PutPoolSizeDefault is a default value of routine pool size to
// process object.Put requests in object service.
PutPoolSizeDefault = 10
) )
// Put returns structure that provides access to "put" subsection of // Put returns structure that provides access to "put" subsection of
@ -31,6 +35,30 @@ func Put(c *config.Config) PutConfig {
} }
} }
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeRemote() int {
v := config.Int(g.cfg, "remote_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
//
// Returns PutPoolSizeDefault if the value is not a positive number.
func (g PutConfig) PoolSizeLocal() int {
v := config.Int(g.cfg, "local_pool_size")
if v > 0 {
return int(v)
}
return PutPoolSizeDefault
}
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool { func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")

View file

@ -13,6 +13,8 @@ func TestObjectSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) { t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig() empty := configtest.EmptyConfig()
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
}) })
@ -20,6 +22,8 @@ func TestObjectSection(t *testing.T) {
const path = "../../../../config/example/node" const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) { fileConfigTest := func(c *config.Config) {
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
} }

View file

@ -1,46 +0,0 @@
package qos
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
subsection = "qos"
criticalSubSection = "critical"
internalSubSection = "internal"
)
// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, criticalSubSection)
}
// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
// parameter from "qos" section.
//
// Returns an empty list if not set.
func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
return authorizedKeys(c, internalSubSection)
}
func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
pubs := make(keys.PublicKeys, 0, len(strKeys))
for i := range strKeys {
pub, err := keys.NewPublicKeyFromString(strKeys[i])
if err != nil {
panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
}
pubs = append(pubs, pub)
}
return pubs
}

View file

@ -1,40 +0,0 @@
package qos
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Empty(t, CriticalAuthorizedKeys(empty))
require.Empty(t, InternalAuthorizedKeys(empty))
})
const path = "../../../../config/example/node"
criticalPubs := make(keys.PublicKeys, 2)
criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
internalPubs := make(keys.PublicKeys, 2)
internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
fileConfigTest := func(c *config.Config) {
require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -11,8 +11,6 @@ const (
// PutTimeoutDefault is a default timeout of object put request in replicator. // PutTimeoutDefault is a default timeout of object put request in replicator.
PutTimeoutDefault = 5 * time.Second PutTimeoutDefault = 5 * time.Second
// PoolSizeDefault is a default pool size for put request in replicator.
PoolSizeDefault = 10
) )
// PutTimeout returns the value of "put_timeout" config parameter // PutTimeout returns the value of "put_timeout" config parameter
@ -30,13 +28,6 @@ func PutTimeout(c *config.Config) time.Duration {
// PoolSize returns the value of "pool_size" config parameter // PoolSize returns the value of "pool_size" config parameter
// from "replicator" section. // from "replicator" section.
//
// Returns PoolSizeDefault if the value is non-positive integer.
func PoolSize(c *config.Config) int { func PoolSize(c *config.Config) int {
v := int(config.IntSafe(c.Sub(subsection), "pool_size")) return int(config.IntSafe(c.Sub(subsection), "pool_size"))
if v > 0 {
return v
}
return PoolSizeDefault
} }

View file

@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
empty := configtest.EmptyConfig() empty := configtest.EmptyConfig()
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) require.Equal(t, 0, replicatorconfig.PoolSize(empty))
}) })
const path = "../../../../config/example/node" const path = "../../../../config/example/node"

View file

@ -1,43 +0,0 @@
package rpcconfig
import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
)
const (
subsection = "rpc"
limitsSubsection = "limits"
)
type LimitConfig struct {
Methods []string
MaxOps int64
}
// Limits returns the "limits" config from "rpc" section.
func Limits(c *config.Config) []LimitConfig {
c = c.Sub(subsection).Sub(limitsSubsection)
var limits []LimitConfig
for i := uint64(0); ; i++ {
si := strconv.FormatUint(i, 10)
sc := c.Sub(si)
methods := config.StringSliceSafe(sc, "methods")
if len(methods) == 0 {
break
}
maxOps := config.IntSafe(sc, "max_ops")
if maxOps == 0 {
panic("no max operations for method group")
}
limits = append(limits, LimitConfig{methods, maxOps})
}
return limits
}

View file

@ -1,53 +0,0 @@
package rpcconfig
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestRPCSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Empty(t, Limits(configtest.EmptyConfig()))
})
t.Run("correct config", func(t *testing.T) {
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(1000))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("no max operations", func(t *testing.T) {
const path = "testdata/node"
fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) })
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
}

View file

@ -1,3 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,18 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
]
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,8 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -5,7 +5,6 @@ import (
"context" "context"
"net" "net"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
@ -29,7 +28,7 @@ import (
func initContainerService(_ context.Context, c *cfg) { func initContainerService(_ context.Context, c *cfg) {
// container wrapper that tries to invoke notary // container wrapper that tries to invoke notary
// requests if chain is configured so // requests if chain is configured so
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
fatalOnErr(err) fatalOnErr(err)
c.shared.cnrClient = wrap c.shared.cnrClient = wrap
@ -43,12 +42,11 @@ func initContainerService(_ context.Context, c *cfg) {
fatalOnErr(err) fatalOnErr(err)
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { if cacheSize > 0 {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
} }
c.shared.frostfsidClient = frostfsIDSubjectProvider c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
&c.key.PrivateKey, &c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr, containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewSplitterService( containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
), ),
) )
service = containerService.NewAuditService(service, c.log, c.audit) service = containerService.NewAuditService(service, c.log, c.audit)
@ -100,7 +96,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
// TODO: use owner directly from the event after neofs-contract#256 will become resolved // TODO: use owner directly from the event after neofs-contract#256 will become resolved
// but don't forget about the profit of reading the new container and caching it: // but don't forget about the profit of reading the new container and caching it:
// creation success are most commonly tracked by polling GET op. // creation success are most commonly tracked by polling GET op.
cnr, err := cnrSrc.Get(ctx, ev.ID) cnr, err := cnrSrc.Get(ev.ID)
if err == nil { if err == nil {
containerCache.containerCache.set(ev.ID, cnr, nil) containerCache.containerCache.set(ev.ID, cnr, nil)
} else { } else {
@ -221,25 +217,20 @@ type morphContainerReader struct {
src containerCore.Source src containerCore.Source
lister interface { lister interface {
ContainersOf(context.Context, *user.ID) ([]cid.ID, error) ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
} }
} }
func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.src.Get(ctx, id) return x.src.Get(id)
} }
func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(ctx, id) return x.src.DeletionInfo(id)
} }
func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(ctx, id) return x.lister.ContainersOf(id)
}
func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(ctx, id, processCID)
} }
type morphContainerWriter struct { type morphContainerWriter struct {

View file

@ -7,12 +7,9 @@ import (
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -53,14 +50,7 @@ func initControlService(ctx context.Context, c *cfg) {
return return
} }
c.cfgControlService.server = grpc.NewServer( c.cfgControlService.server = grpc.NewServer()
grpc.ChainUnaryInterceptor(
qos.NewSetCriticalIOTagUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(),
),
// control service has no stream methods, so no stream interceptors added
)
c.onShutdown(func() { c.onShutdown(func() {
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"context"
"strings" "strings"
"time" "time"
@ -43,7 +42,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int
} }
} }
func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -56,7 +55,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return result.subject, result.err return result.subject, result.err
} }
subj, err := m.subjProvider.GetSubject(ctx, addr) subj, err := m.subjProvider.GetSubject(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjCache.Add(addr, subjectWithError{ m.subjCache.Add(addr, subjectWithError{
@ -70,7 +69,7 @@ func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160)
return subj, nil return subj, nil
} }
func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
hit := false hit := false
startedAt := time.Now() startedAt := time.Now()
defer func() { defer func() {
@ -83,7 +82,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.
return result.subject, result.err return result.subject, result.err
} }
subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) subjExt, err := m.subjProvider.GetSubjectExtended(addr)
if err != nil { if err != nil {
if m.isCacheableError(err) { if m.isCacheableError(err) {
m.subjExtCache.Add(addr, subjectExtWithError{ m.subjExtCache.Add(addr, subjectExtWithError{

View file

@ -4,19 +4,14 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt"
"net" "net"
"time" "time"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
@ -135,16 +130,12 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
serverOpts := []grpc.ServerOption{ serverOpts := []grpc.ServerOption{
grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.MaxRecvMsgSize(maxRecvMsgSize),
grpc.ChainUnaryInterceptor( grpc.ChainUnaryInterceptor(
qos.NewUnaryServerInterceptor(),
metrics.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(),
tracing.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
), ),
grpc.ChainStreamInterceptor( grpc.ChainStreamInterceptor(
qos.NewStreamServerInterceptor(),
metrics.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(),
tracing.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(),
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
), ),
} }
@ -233,54 +224,3 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
} }
func initRPCLimiter(c *cfg) error {
var limits []limiting.KeyLimit
for _, l := range rpcconfig.Limits(c.appCfg) {
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
}
if err := validateRPCLimits(c, limits); err != nil {
return fmt.Errorf("validate RPC limits: %w", err)
}
limiter, err := limiting.NewSemaphoreLimiter(limits)
if err != nil {
return fmt.Errorf("create RPC limiter: %w", err)
}
c.cfgGRPC.limiter.Store(limiter)
return nil
}
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
for _, limit := range limits {
for _, method := range limit.Keys {
if _, ok := availableMethods[method]; !ok {
return fmt.Errorf("set limit on an unknown method %q", method)
}
}
}
return nil
}
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
res := make(map[string]struct{})
for _, server := range servers {
for _, method := range getMethodsForServer(server.Server) {
res[method] = struct{}{}
}
}
return res
}
func getMethodsForServer(server *grpc.Server) []string {
var res []string
for service, info := range server.GetServiceInfo() {
for _, method := range info.Methods {
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
}
}
return res
}

View file

@ -101,7 +101,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
initAccessPolicyEngine(ctx, c) initAccessPolicyEngine(ctx, c)
initAndLog(ctx, c, "access policy engine", func(c *cfg) { initAndLog(ctx, c, "access policy engine", func(c *cfg) {
@ -117,8 +116,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(ctx, c, "apemanager", initAPEManagerService) initAndLog(ctx, c, "apemanager", initAPEManagerService)
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
} }
@ -137,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
err := stopper(ctx) err := stopper(ctx)
if err != nil { if err != nil {
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.Error(err), zap.String("error", err.Error()),
) )
} }

View file

@ -35,16 +35,20 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
lookupScriptHashesInNNS(c) // smart contract auto negotiation lookupScriptHashesInNNS(c) // smart contract auto negotiation
if c.cfgMorph.notaryEnabled {
err := c.cfgMorph.client.EnableNotarySupport( err := c.cfgMorph.client.EnableNotarySupport(
client.WithProxyContract( client.WithProxyContract(
c.cfgMorph.proxyScriptHash, c.cfgMorph.proxyScriptHash,
), ),
) )
fatalOnErr(err) fatalOnErr(err)
}
c.log.Info(ctx, logs.FrostFSNodeNotarySupport) c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
fatalOnErr(err) fatalOnErr(err)
var netmapSource netmap.Source var netmapSource netmap.Source
@ -96,7 +100,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
if err != nil { if err != nil {
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses), zap.Any("endpoints", addresses),
zap.Error(err), zap.String("error", err.Error()),
) )
fatalOnErr(err) fatalOnErr(err)
@ -112,9 +116,15 @@ func initMorphClient(ctx context.Context, c *cfg) {
} }
c.cfgMorph.client = cli c.cfgMorph.client = cli
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
} }
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// skip notary deposit in non-notary environments
if !c.cfgMorph.notaryEnabled {
return
}
tx, vub, err := makeNotaryDeposit(ctx, c) tx, vub, err := makeNotaryDeposit(ctx, c)
fatalOnErr(err) fatalOnErr(err)
@ -151,7 +161,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
} }
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
return err return err
} }
@ -168,7 +178,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil { if err != nil {
fromSideChainBlock = 0 fromSideChainBlock = 0
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
} }
subs, err = subscriber.New(ctx, &subscriber.Params{ subs, err = subscriber.New(ctx, &subscriber.Params{
@ -223,17 +233,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
subs map[event.Type][]event.Handler, subs map[event.Type][]event.Handler,
) { ) {
for typ, handlers := range subs { for typ, handlers := range subs {
pi := event.NotificationParserInfo{}
pi.SetType(typ)
pi.SetScriptHash(scHash)
p, ok := parsers[typ] p, ok := parsers[typ]
if !ok { if !ok {
panic(fmt.Sprintf("missing parser for event %s", typ)) panic(fmt.Sprintf("missing parser for event %s", typ))
} }
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ pi.SetParser(p)
Contract: scHash,
Type: typ, lis.SetNotificationParser(pi)
Parser: p,
Handlers: handlers, for _, h := range handlers {
}) hi := event.NotificationHandlerInfo{}
hi.SetType(typ)
hi.SetScriptHash(scHash)
hi.SetHandler(h)
lis.RegisterNotificationHandler(hi)
}
} }
} }
@ -262,6 +282,10 @@ func lookupScriptHashesInNNS(c *cfg) {
) )
for _, t := range targets { for _, t := range targets {
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
continue // ignore proxy contract if notary disabled
}
if emptyHash.Equals(*t.h) { if emptyHash.Equals(*t.h) {
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
} }
} }
s.setControlNetmapStatus(ctrlNetSt) s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
} }
// sets the current node state to the given value. Subsequent cfg.bootstrap // sets the current node state to the given value. Subsequent cfg.bootstrap
@ -193,14 +193,16 @@ func addNewEpochNotificationHandlers(c *cfg) {
} }
}) })
if c.cfgMorph.notaryEnabled {
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
_, _, err := makeNotaryDeposit(ctx, c) _, _, err := makeNotaryDeposit(ctx, c)
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
}) })
}
} }
// bootstrapNode adds current node to the Network map. // bootstrapNode adds current node to the Network map.
@ -239,7 +241,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
// initNetmapState inits current Network map state. // initNetmapState inits current Network map state.
// Must be called after Morph components initialization. // Must be called after Morph components initialization.
func initNetmapState(ctx context.Context, c *cfg) { func initNetmapState(ctx context.Context, c *cfg) {
epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) epoch, err := c.cfgNetmap.wrapper.Epoch()
fatalOnErrDetails("could not initialize current epoch number", err) fatalOnErrDetails("could not initialize current epoch number", err)
var ni *netmapSDK.NodeInfo var ni *netmapSDK.NodeInfo
@ -278,7 +280,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
} }
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -291,7 +293,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
} }
} }
node, err := c.netmapLocalNodeState(ctx, epoch) node, err := c.netmapLocalNodeState(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -312,9 +314,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
return candidate, nil return candidate, nil
} }
func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
// calculate current network state // calculate current network state
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -376,8 +378,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
} }
func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
epoch, err := c.netMapSource.Epoch(ctx) epoch, err := c.netMapSource.Epoch()
if err != nil { if err != nil {
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
} }
@ -390,7 +392,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error {
} }
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
if err != nil { if err != nil {
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
} else if !netSettings.MaintenanceModeAllowed { } else if !netSettings.MaintenanceModeAllowed {
@ -423,7 +425,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
if err != nil { if err != nil {
return err return err
} }
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
} }
type netInfo struct { type netInfo struct {
@ -438,7 +440,7 @@ type netInfo struct {
msPerBlockRdr func() (int64, error) msPerBlockRdr func() (int64, error)
} }
func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
magic, err := n.magic.MagicNumber() magic, err := n.magic.MagicNumber()
if err != nil { if err != nil {
return nil, err return nil, err
@ -448,7 +450,7 @@ func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.Net
ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetCurrentEpoch(n.netState.CurrentEpoch())
ni.SetMagicNumber(magic) ni.SetMagicNumber(magic)
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
if err != nil { if err != nil {
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
} }

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@ -54,11 +55,11 @@ type objectSvc struct {
patch *patchsvc.Service patch *patchsvc.Service
} }
func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil { if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
@ -122,8 +123,8 @@ type innerRingFetcherWithNotary struct {
sidechain *morphClient.Client sidechain *morphClient.Client
} }
func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
keys, err := fn.sidechain.NeoFSAlphabetList(ctx) keys, err := fn.sidechain.NeoFSAlphabetList()
if err != nil { if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
} }
@ -136,6 +137,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]by
return result, nil return result, nil
} }
type innerRingFetcherWithoutNotary struct {
nm *nmClient.Client
}
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
keys, err := f.nm.GetInnerRingList()
if err != nil {
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
}
result := make([][]byte, 0, len(keys))
for i := range keys {
result = append(result, keys[i].Bytes())
}
return result, nil
}
func initObjectService(c *cfg) { func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
@ -168,7 +187,7 @@ func initObjectService(c *cfg) {
sPatch := createPatchSvc(sGet, sPut) sPatch := createPatchSvc(sGet, sPut)
// build service pipeline // build service pipeline
// grpc | audit | qos | <metrics> | signature | response | acl | ape | split // grpc | audit | <metrics> | signature | response | acl | ape | split
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
@ -191,8 +210,7 @@ func initObjectService(c *cfg) {
c.shared.metricsSvc = objectService.NewMetricCollector( c.shared.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc) server := objectTransportGRPC.New(auditSvc)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
@ -216,7 +234,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
prm.MarkAsGarbage(addr) prm.MarkAsGarbage(addr)
prm.WithForceRemoval() prm.WithForceRemoval()
return ls.Inhume(ctx, prm) _, err := ls.Inhume(ctx, prm)
return err
} }
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
@ -266,9 +285,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
var inhumePrm engine.InhumePrm var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr) inhumePrm.MarkAsGarbage(addr)
if err := ls.Inhume(ctx, inhumePrm); err != nil { _, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.Error(err), zap.String("error", err.Error()),
) )
} }
}), }),
@ -285,9 +305,14 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
} }
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
if c.cfgMorph.client.ProbeNotary() {
return &innerRingFetcherWithNotary{ return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client, sidechain: c.cfgMorph.client,
} }
}
return &innerRingFetcherWithoutNotary{
nm: c.cfgNetmap.wrapper,
}
} }
func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator { func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator {
@ -326,6 +351,7 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c, c,
c.cfgNetmap.state, c.cfgNetmap.state,
irFetcher, irFetcher,
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
objectwriter.WithLogger(c.log), objectwriter.WithLogger(c.log),
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
) )
@ -474,7 +500,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
prm.WithTarget(tombstone, addrs...) prm.WithTarget(tombstone, addrs...)
return e.engine.Inhume(ctx, prm) _, err := e.engine.Inhume(ctx, prm)
return err
} }
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {

View file

@ -1,95 +0,0 @@
package main
import (
"bytes"
"context"
qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"go.uber.org/zap"
)
type cfgQoSService struct {
netmapSource netmap.Source
logger *logger.Logger
allowedCriticalPubs [][]byte
allowedInternalPubs [][]byte
}
func initQoSService(c *cfg) {
criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
rawInternalPubs := make([][]byte, 0, len(internalPubs))
for i := range criticalPubs {
rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
}
for i := range internalPubs {
rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
}
c.cfgQoSService = cfgQoSService{
netmapSource: c.netMapSource,
logger: c.log,
allowedCriticalPubs: rawCriticalPubs,
allowedInternalPubs: rawInternalPubs,
}
}
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
if err != nil {
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
switch ioTag {
case qos.IOTagClient:
return ctx
case qos.IOTagCritical:
for _, pk := range s.allowedCriticalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
default:
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}

Some files were not shown because too many files have changed in this diff Show more