Compare commits
8 commits
master
...
feat/refac
Author | SHA1 | Date | |
---|---|---|---|
462876291d | |||
f5f5188b58 | |||
4ba592f7f8 | |||
ef33e875ed | |||
07c96414d0 | |||
fb0d271675 | |||
1e59a634b7 | |||
fa6e39603b |
499 changed files with 5206 additions and 5467 deletions
|
@ -1,10 +1,6 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
name: OCI image
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
image:
|
||||
name: Build container images
|
||||
runs-on: docker
|
||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
||||
steps:
|
||||
- name: Clone git repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build OCI image
|
||||
run: make images
|
||||
|
||||
- name: Push image to OCI registry
|
||||
run: |
|
||||
echo "$REGISTRY_PASSWORD" \
|
||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
||||
make push-images
|
||||
if: >-
|
||||
startsWith(github.ref, 'refs/tags/v') &&
|
||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
||||
env:
|
||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
@ -1,10 +1,5 @@
|
|||
name: Pre-commit hooks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
precommit:
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
name: Tests and linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
name: Vulncheck
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
|
|
|
@ -89,7 +89,5 @@ linters:
|
|||
- protogetter
|
||||
- intrange
|
||||
- tenv
|
||||
- unconvert
|
||||
- unparam
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
|
24
CHANGELOG.md
24
CHANGELOG.md
|
@ -9,30 +9,6 @@ Changelog for FrostFS Node
|
|||
### Removed
|
||||
### Updated
|
||||
|
||||
## [v0.44.0] - 2024-25-11 - Rongbuk
|
||||
|
||||
### Added
|
||||
- Allow to prioritize nodes during GET traversal via attributes (#1439)
|
||||
- Add metrics for the frostfsid cache (#1464)
|
||||
- Customize constant attributes attached to every tracing span (#1488)
|
||||
- Manage additional keys in the `frostfsid` contract (#1505)
|
||||
- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519)
|
||||
|
||||
### Changed
|
||||
- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396)
|
||||
- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515)
|
||||
|
||||
### Fixed
|
||||
- Fix EC object search (#1408)
|
||||
- Fix EC object put when one of the nodes is unavailable (#1427)
|
||||
|
||||
### Removed
|
||||
- Drop most of the eACL-related code (#1425)
|
||||
- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483)
|
||||
|
||||
### Upgrading from v0.43.0
|
||||
The metabase schema has changed completely, resync is required.
|
||||
|
||||
## [v0.42.0]
|
||||
|
||||
### Added
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
26
Makefile
26
Makefile
|
@ -8,8 +8,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.62.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
||||
LINT_VERSION ?= 1.61.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
|
||||
PROTOC_OS_VERSION=osx-x86_64
|
||||
|
@ -139,15 +139,6 @@ images: image-storage image-ir image-cli image-adm
|
|||
# Build dirty local Docker images
|
||||
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
|
||||
|
||||
# Push FrostFS components' docker image to the registry
|
||||
push-image-%:
|
||||
@echo "⇒ Publish FrostFS $* docker image "
|
||||
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
|
||||
|
||||
# Push all Docker images to the registry
|
||||
.PHONY: push-images
|
||||
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
|
||||
|
||||
# Run `make %` in Golang container
|
||||
docker/%:
|
||||
docker run --rm -t \
|
||||
|
@ -279,12 +270,10 @@ env-up: all
|
|||
echo "Frostfs contracts not found"; exit 1; \
|
||||
fi
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
|
||||
--storage-wallet ./dev/storage/wallet01.json \
|
||||
--storage-wallet ./dev/storage/wallet02.json \
|
||||
--storage-wallet ./dev/storage/wallet03.json \
|
||||
--storage-wallet ./dev/storage/wallet04.json
|
||||
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
|
||||
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
|
||||
make locode-download; \
|
||||
fi
|
||||
|
@ -293,6 +282,7 @@ env-up: all
|
|||
|
||||
# Shutdown dev environment
|
||||
env-down:
|
||||
docker compose -f dev/docker-compose.yml down -v
|
||||
docker compose -f dev/docker-compose.yml down
|
||||
docker volume rm -f frostfs-node_neo-go
|
||||
rm -rf ./$(TMP_DIR)/state
|
||||
rm -rf ./$(TMP_DIR)/storage
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.44.0
|
||||
v0.42.0
|
||||
|
|
|
@ -20,7 +20,6 @@ const (
|
|||
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
|
||||
|
||||
LocalDumpFlag = "local-dump"
|
||||
ProtoConfigPath = "protocol"
|
||||
ContractsInitFlag = "contracts"
|
||||
ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)"
|
||||
ContractsURLFlag = "contracts-url"
|
||||
|
|
|
@ -135,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve container contract hash: %w", err)
|
||||
}
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create morph container client: %w", err)
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape"
|
||||
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -200,7 +200,7 @@ func listRuleChains(cmd *cobra.Command, _ []string) {
|
|||
|
||||
func setAdmin(cmd *cobra.Command, _ []string) {
|
||||
s, _ := cmd.Flags().GetString(addrAdminFlag)
|
||||
addr, err := address.StringToUint160(s)
|
||||
addr, err := util.Uint160DecodeStringLE(s)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err)
|
||||
pci, ac := newPolicyContractInterface(cmd)
|
||||
h, vub, err := pci.SetAdmin(addr)
|
||||
|
@ -214,7 +214,7 @@ func getAdmin(cmd *cobra.Command, _ []string) {
|
|||
pci, _ := newPolicyContractReaderInterface(cmd)
|
||||
addr, err := pci.GetAdmin()
|
||||
commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err)
|
||||
cmd.Println(address.Uint160ToString(addr))
|
||||
cmd.Println(addr.StringLE())
|
||||
}
|
||||
|
||||
func listTargets(cmd *cobra.Command, _ []string) {
|
||||
|
|
|
@ -53,15 +53,16 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
|
|||
}
|
||||
|
||||
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
var ch util.Uint160
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err := helper.GetContractByID(r, 1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
|
||||
|
||||
ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
|
||||
|
||||
invokerAdapter := &invokerAdapter{
|
||||
|
@ -73,7 +74,7 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
|
|||
}
|
||||
|
||||
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
|
||||
|
|
|
@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
nmHash util.Uint160
|
||||
)
|
||||
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
const forceConfigSet = "force"
|
||||
|
||||
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import "time"
|
|||
|
||||
const (
|
||||
ConsensusAccountName = "consensus"
|
||||
ProtoConfigPath = "protocol"
|
||||
|
||||
// MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size
|
||||
// of the invocation script.
|
||||
|
|
|
@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("invalid filename: %w", err)
|
||||
}
|
||||
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
|
|||
}
|
||||
|
||||
func listContainers(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type contractDumpInfo struct {
|
|||
}
|
||||
|
||||
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var (
|
||||
frostfsidAddSubjectKeyCmd = &cobra.Command{
|
||||
Use: "add-subject-key",
|
||||
Short: "Add a public key to the subject in frostfsid contract",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidAddSubjectKey,
|
||||
}
|
||||
frostfsidRemoveSubjectKeyCmd = &cobra.Command{
|
||||
Use: "remove-subject-key",
|
||||
Short: "Remove a public key from the subject in frostfsid contract",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidRemoveSubjectKey,
|
||||
}
|
||||
)
|
||||
|
||||
func initFrostfsIDAddSubjectKeyCmd() {
|
||||
Cmd.AddCommand(frostfsidAddSubjectKeyCmd)
|
||||
|
||||
ff := frostfsidAddSubjectKeyCmd.Flags()
|
||||
ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
|
||||
ff.String(subjectAddressFlag, "", "Subject address")
|
||||
_ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
|
||||
|
||||
ff.String(subjectKeyFlag, "", "Public key to add")
|
||||
_ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
|
||||
}
|
||||
|
||||
func initFrostfsIDRemoveSubjectKeyCmd() {
|
||||
Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd)
|
||||
|
||||
ff := frostfsidRemoveSubjectKeyCmd.Flags()
|
||||
ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
|
||||
ff.String(subjectAddressFlag, "", "Subject address")
|
||||
_ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag)
|
||||
|
||||
ff.String(subjectKeyFlag, "", "Public key to remove")
|
||||
_ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag)
|
||||
}
|
||||
|
||||
func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) {
|
||||
addr := getFrostfsIDSubjectAddress(cmd)
|
||||
pub := getFrostfsIDSubjectKey(cmd)
|
||||
|
||||
ffsid, err := newFrostfsIDClient(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
|
||||
|
||||
ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub))
|
||||
|
||||
err = ffsid.sendWait()
|
||||
commonCmd.ExitOnErr(cmd, "add subject key: %w", err)
|
||||
}
|
||||
|
||||
func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) {
|
||||
addr := getFrostfsIDSubjectAddress(cmd)
|
||||
pub := getFrostfsIDSubjectKey(cmd)
|
||||
|
||||
ffsid, err := newFrostfsIDClient(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
|
||||
|
||||
ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub))
|
||||
|
||||
err = ffsid.sendWait()
|
||||
commonCmd.ExitOnErr(cmd, "remove subject key: %w", err)
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
@ -60,6 +61,7 @@ var (
|
|||
Use: "list-namespaces",
|
||||
Short: "List all namespaces in frostfsid",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidListNamespaces,
|
||||
|
@ -89,6 +91,7 @@ var (
|
|||
Use: "list-subjects",
|
||||
Short: "List subjects in namespace",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidListSubjects,
|
||||
|
@ -118,6 +121,7 @@ var (
|
|||
Use: "list-groups",
|
||||
Short: "List groups in namespace",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidListGroups,
|
||||
|
@ -147,6 +151,7 @@ var (
|
|||
Use: "list-group-subjects",
|
||||
Short: "List subjects in group",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidListGroupSubjects,
|
||||
|
@ -164,6 +169,7 @@ func initFrostfsIDCreateNamespaceCmd() {
|
|||
func initFrostfsIDListNamespacesCmd() {
|
||||
Cmd.AddCommand(frostfsidListNamespacesCmd)
|
||||
frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidListNamespacesCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initFrostfsIDCreateSubjectCmd() {
|
||||
|
@ -187,6 +193,7 @@ func initFrostfsIDListSubjectsCmd() {
|
|||
frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects")
|
||||
frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||
frostfsidListSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initFrostfsIDCreateGroupCmd() {
|
||||
|
@ -210,6 +217,7 @@ func initFrostfsIDListGroupsCmd() {
|
|||
Cmd.AddCommand(frostfsidListGroupsCmd)
|
||||
frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups")
|
||||
frostfsidListGroupsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initFrostfsIDAddSubjectToGroupCmd() {
|
||||
|
@ -234,6 +242,7 @@ func initFrostfsIDListGroupSubjectsCmd() {
|
|||
frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name")
|
||||
frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id")
|
||||
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||
frostfsidListGroupSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
|
||||
|
@ -253,7 +262,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
|
|||
reader := frostfsidrpclient.NewReader(inv, hash)
|
||||
sessionID, it, err := reader.ListNamespaces()
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
namespaces, err := frostfsidclient.ParseNamespaces(items)
|
||||
|
@ -305,7 +314,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListNamespaceSubjects(ns)
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
|
||||
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
|
||||
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
|
||||
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
|
||||
|
||||
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
|
||||
|
@ -319,7 +328,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListSubjects()
|
||||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
subj, err := frostfsidclient.ParseSubject(items)
|
||||
|
@ -365,7 +374,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListGroups(ns)
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
|
||||
groups, err := frostfsidclient.ParseGroups(items)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
|
||||
|
@ -415,7 +424,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
|
||||
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
|
||||
|
@ -488,28 +497,32 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
|
|||
}
|
||||
f.bw.Reset()
|
||||
|
||||
if len(f.wCtx.SentTxs) == 0 {
|
||||
return nil, errors.New("no transactions to wait")
|
||||
}
|
||||
|
||||
f.wCtx.Command.Println("Waiting for transactions to persist...")
|
||||
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
|
||||
}
|
||||
|
||||
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
|
||||
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
|
||||
var shouldStop bool
|
||||
res := make([]stackitem.Item, 0)
|
||||
for !shouldStop {
|
||||
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
|
||||
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = append(res, items...)
|
||||
shouldStop = len(items) < iteratorBatchSize
|
||||
shouldStop = len(items) < batchSize
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -1,12 +1,59 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamespaceRegexp(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
|
|
@ -12,6 +12,4 @@ func init() {
|
|||
initFrostfsIDAddSubjectToGroupCmd()
|
||||
initFrostfsIDRemoveSubjectFromGroupCmd()
|
||||
initFrostfsIDListGroupSubjectsCmd()
|
||||
initFrostfsIDAddSubjectKeyCmd()
|
||||
initFrostfsIDRemoveSubjectKeyCmd()
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -140,29 +141,60 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
|
|||
}
|
||||
|
||||
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
|
||||
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
w, err := wallet.NewWallet(walletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create wallet: %w", err)
|
||||
}
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
|
||||
return refillGas(cmd, storageGasConfigFlag, true)
|
||||
}
|
||||
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
|
||||
// storage wallet path is not part of the config
|
||||
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
// wallet address is not part of the config
|
||||
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
|
||||
|
||||
var gasReceiver util.Uint160
|
||||
|
||||
if len(walletAddress) != 0 {
|
||||
gasReceiver, err = address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
} else {
|
||||
if storageWalletPath == "" {
|
||||
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
var w *wallet.Wallet
|
||||
|
||||
if createWallet {
|
||||
w, err = wallet.NewWallet(storageWalletPath)
|
||||
} else {
|
||||
w, err = wallet.NewWalletFromFile(storageWalletPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
if createWallet {
|
||||
var password string
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
gasReceiver = w.Accounts[0].Contract.ScriptHash()
|
||||
}
|
||||
|
||||
gasStr := viper.GetString(gasFlag)
|
||||
|
||||
gasAmount, err := helper.ParseGASAmount(gasStr)
|
||||
|
@ -176,11 +208,9 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160)
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
for _, gasReceiver := range gasReceivers {
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
if bw.Err != nil {
|
||||
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
|
||||
}
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -38,27 +33,7 @@ var (
|
|||
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
|
||||
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
|
||||
|
||||
var gasReceivers []util.Uint160
|
||||
for _, walletAddress := range walletAddresses {
|
||||
addr, err := address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, addr)
|
||||
}
|
||||
for _, storageWalletPath := range storageWalletPaths {
|
||||
w, err := wallet.NewWalletFromFile(storageWalletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
|
||||
}
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
|
||||
},
|
||||
}
|
||||
GenerateAlphabetCmd = &cobra.Command{
|
||||
|
@ -75,10 +50,10 @@ var (
|
|||
func initRefillGasCmd() {
|
||||
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
|
||||
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
|
||||
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
|
||||
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
func initGenerateStorageCmd() {
|
||||
|
|
|
@ -38,24 +38,38 @@ func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*Local
|
|||
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
|
||||
var act *actor.Actor
|
||||
var accounts []*wallet.Account
|
||||
if walletDir == "" {
|
||||
account, err := wallet.NewAccount()
|
||||
commonCmd.ExitOnErr(cmd, "unable to create dummy account: %w", err)
|
||||
act, err = actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: account.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: account,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
|
||||
|
||||
wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
|
||||
|
||||
for _, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, accName)
|
||||
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
|
||||
accounts = append(accounts, acc)
|
||||
}
|
||||
act, err = actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: accounts[0].Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: accounts[0],
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for _, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, accName)
|
||||
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
|
||||
accounts = append(accounts, acc)
|
||||
}
|
||||
act, err = actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: accounts[0].Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: accounts[0],
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &LocalActor{
|
||||
neoActor: act,
|
||||
|
|
|
@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
|
|||
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
|
||||
}
|
||||
if method != constants.UpdateMethodName || err == nil && !found {
|
||||
h, found, err = getFrostfsIDAdmin(viper.GetViper())
|
||||
h, found, err = GetFrostfsIDAdmin(viper.GetViper())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
const frostfsIDAdminConfigKey = "frostfsid.admin"
|
||||
|
||||
func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
admin := v.GetString(frostfsIDAdminConfigKey)
|
||||
if admin == "" {
|
||||
return util.Uint160{}, false, nil
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
|
@ -134,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
|
|||
return nil, err
|
||||
}
|
||||
|
||||
accounts, err := getSingleAccounts(wallets)
|
||||
accounts, err := createWalletAccounts(wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cliCtx, err := defaultClientContext(c, committeeAcc)
|
||||
cliCtx, err := DefaultClientContext(c, committeeAcc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("client context: %w", err)
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
|
|||
}
|
||||
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
|
||||
} else {
|
||||
c, err = NewRemoteClient(v)
|
||||
c, err = GetN3Client(v)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create N3 client: %w", err)
|
||||
|
@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
|
|||
return ctrPath, nil
|
||||
}
|
||||
|
||||
func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, constants.SingleAccountName)
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"sort"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
|
@ -48,7 +47,7 @@ type LocalClient struct {
|
|||
}
|
||||
|
||||
func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) {
|
||||
cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath))
|
||||
cfg, err := config.LoadFile(v.GetString(constants.ProtoConfigPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -58,59 +57,17 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
return nil, err
|
||||
}
|
||||
|
||||
go bc.Run()
|
||||
|
||||
accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
if err := restoreDump(bc, dumpPath); err != nil {
|
||||
return nil, fmt.Errorf("restore dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func restoreDump(bc *core.Blockchain, dumpPath string) error {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accounts[i] = acc
|
||||
}
|
||||
|
||||
indexMap := make(map[string]int)
|
||||
for i, pub := range cfg.StandbyCommittee {
|
||||
for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
|
||||
indexMap[pub] = i
|
||||
}
|
||||
|
||||
|
@ -119,12 +76,37 @@ func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet
|
|||
pj := accounts[j].PrivateKey().PublicKey().Bytes()
|
||||
return indexMap[string(pi)] < indexMap[string(pj)]
|
||||
})
|
||||
sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
|
||||
sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
|
||||
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
|
||||
})
|
||||
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
|
||||
return accounts[:m], nil
|
||||
go bc.Run()
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't restore local dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts[:m],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetBlockCount() (uint32, error) {
|
||||
|
@ -145,6 +127,11 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
|
|||
return &a, nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
// InvokeFunction is implemented via `InvokeScript`.
|
||||
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
|
||||
var err error
|
||||
|
@ -308,7 +295,13 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
|
|||
}
|
||||
|
||||
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
|
||||
tx = tx.Copy()
|
||||
// We need to test that transaction was formed correctly to catch as many errors as we can.
|
||||
bs := tx.Bytes()
|
||||
_, err := transaction.NewTransactionFromBytes(bs)
|
||||
if err != nil {
|
||||
return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
|
||||
}
|
||||
|
||||
l.transactions = append(l.transactions, tx)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
|
@ -24,10 +25,15 @@ import (
|
|||
// Client represents N3 client interface capable of test-invoking scripts
|
||||
// and sending signed transactions to chain.
|
||||
type Client interface {
|
||||
actor.RPCActor
|
||||
invoker.RPCInvoke
|
||||
|
||||
GetBlockCount() (uint32, error)
|
||||
GetNativeContracts() ([]state.Contract, error)
|
||||
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
|
||||
GetVersion() (*result.Version, error)
|
||||
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
|
||||
GetCommittee() (keys.PublicKeys, error)
|
||||
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
|
||||
}
|
||||
|
||||
type HashVUBPair struct {
|
||||
|
@ -42,7 +48,7 @@ type ClientContext struct {
|
|||
SentTxs []HashVUBPair
|
||||
}
|
||||
|
||||
func NewRemoteClient(v *viper.Viper) (Client, error) {
|
||||
func GetN3Client(v *viper.Viper) (Client, error) {
|
||||
// number of opened connections
|
||||
// by neo-go client per one host
|
||||
const (
|
||||
|
@ -82,14 +88,8 @@ func NewRemoteClient(v *viper.Viper) (Client, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := NewActor(c, committeeAcc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -15,8 +15,10 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -85,6 +87,16 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
|||
return wallets, nil
|
||||
}
|
||||
|
||||
func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
|
||||
return actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
}
|
||||
|
||||
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
|
||||
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
|
||||
if err != nil {
|
||||
|
|
|
@ -62,7 +62,7 @@ func testInitialize(t *testing.T, committeeSize int) {
|
|||
v := viper.GetViper()
|
||||
|
||||
require.NoError(t, generateTestData(testdataDir, committeeSize))
|
||||
v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
|
||||
v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName))
|
||||
|
||||
// Set to the path or remove the next statement to download from the network.
|
||||
require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath))
|
||||
|
|
|
@ -2,6 +2,7 @@ package initialize
|
|||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -31,7 +32,7 @@ var Cmd = &cobra.Command{
|
|||
_ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag))
|
||||
_ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag))
|
||||
_ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag))
|
||||
_ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath))
|
||||
_ = viper.BindPFlag(constants.ProtoConfigPath, cmd.Flags().Lookup(constants.ProtoConfigPath))
|
||||
},
|
||||
RunE: initializeSideChainCmd,
|
||||
}
|
||||
|
@ -47,7 +48,7 @@ func initInitCmd() {
|
|||
// Defaults are taken from neo-preodolenie.
|
||||
Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee")
|
||||
Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee")
|
||||
Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration")
|
||||
Cmd.Flags().String(constants.ProtoConfigPath, "", "Path to the consensus node configuration")
|
||||
Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
|
||||
Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag)
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -12,6 +12,7 @@ var (
|
|||
Short: "List netmap candidates nodes",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
},
|
||||
Run: listNetmapCandidatesNodes,
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ func initRegisterCmd() {
|
|||
}
|
||||
|
||||
func registerDomain(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
email, _ := cmd.Flags().GetString(nnsEmailFlag)
|
||||
|
@ -53,7 +53,7 @@ func initDeleteCmd() {
|
|||
}
|
||||
|
||||
func deleteDomain(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
h, vub, err := c.DeleteDomain(name)
|
||||
|
|
|
@ -5,15 +5,15 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
||||
func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, util.Uint160) {
|
||||
v := viper.GetViper()
|
||||
c, err := helper.NewRemoteClient(v)
|
||||
c, err := helper.GetN3Client(v)
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
|
||||
|
@ -22,17 +22,5 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
|||
r := management.NewReader(ac.Invoker)
|
||||
nnsCs, err := helper.GetContractByID(r, 1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
|
||||
return client.New(ac, nnsCs.Hash), ac
|
||||
}
|
||||
|
||||
func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err := helper.GetContractByID(r, 1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
|
||||
|
||||
return client.NewReader(inv, nnsCs.Hash), inv
|
||||
return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -28,6 +29,7 @@ func initAddRecordCmd() {
|
|||
func initGetRecordsCmd() {
|
||||
Cmd.AddCommand(getRecordsCmd)
|
||||
getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
getRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
|
||||
|
||||
|
@ -59,7 +61,7 @@ func initDelRecordCmd() {
|
|||
}
|
||||
|
||||
func addRecord(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
|
||||
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
|
||||
|
@ -75,16 +77,16 @@ func addRecord(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func getRecords(cmd *cobra.Command, _ []string) {
|
||||
c, inv := nnsReader(cmd)
|
||||
c, act, hash := getRPCClient(cmd)
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
|
||||
if recordType == "" {
|
||||
sid, r, err := c.GetAllRecords(name)
|
||||
sid, r, err := unwrap.SessionIterator(act.Invoker.Call(hash, "getAllRecords", name))
|
||||
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
|
||||
defer func() {
|
||||
_ = inv.TerminateSession(sid)
|
||||
_ = act.Invoker.TerminateSession(sid)
|
||||
}()
|
||||
items, err := inv.TraverseIterator(sid, &r, 0)
|
||||
items, err := act.Invoker.TraverseIterator(sid, &r, 0)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
|
||||
for len(items) != 0 {
|
||||
for j := range items {
|
||||
|
@ -95,7 +97,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
|
|||
recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())),
|
||||
string(bs))
|
||||
}
|
||||
items, err = inv.TraverseIterator(sid, &r, 0)
|
||||
items, err = act.Invoker.TraverseIterator(sid, &r, 0)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get records: %w", err)
|
||||
}
|
||||
} else {
|
||||
|
@ -112,7 +114,7 @@ func getRecords(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func delRecords(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
|
||||
typ, err := getRecordType(recordType)
|
||||
|
@ -127,7 +129,7 @@ func delRecords(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func delRecord(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
|
||||
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
|
||||
|
|
|
@ -14,7 +14,7 @@ func initRenewCmd() {
|
|||
}
|
||||
|
||||
func renewDomain(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
h, vub, err := c.Renew(name)
|
||||
commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err)
|
||||
|
|
|
@ -18,11 +18,12 @@ const (
|
|||
func initTokensCmd() {
|
||||
Cmd.AddCommand(tokensCmd)
|
||||
tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
|
||||
}
|
||||
|
||||
func listTokens(cmd *cobra.Command, _ []string) {
|
||||
c, _ := nnsReader(cmd)
|
||||
c, _, _ := getRPCClient(cmd)
|
||||
it, err := c.Tokens()
|
||||
commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
|
||||
for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
|
||||
|
@ -40,7 +41,7 @@ func listTokens(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func getCnameRecord(c *client.ContractReader, token []byte) (string, error) {
|
||||
func getCnameRecord(c *client.Contract, token []byte) (string, error) {
|
||||
items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
|
||||
|
||||
// GetRecords returns the error "not an array" if the domain does not contain records.
|
||||
|
|
|
@ -30,7 +30,7 @@ func initUpdateCmd() {
|
|||
}
|
||||
|
||||
func updateSOA(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
c, actor, _ := getRPCClient(cmd)
|
||||
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
email, _ := cmd.Flags().GetString(nnsEmailFlag)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
|
@ -40,8 +41,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
accHash := w.GetChangeAddress()
|
||||
addr, _ := cmd.Flags().GetString(walletAccountFlag)
|
||||
if addr != "" {
|
||||
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
|
||||
accHash, err = address.StringToUint160(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid address: %s", addr)
|
||||
|
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("can't find account for %s", accHash)
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
|
||||
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
|
||||
pass, err := input.ReadPassword(prompt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get password: %v", err)
|
||||
|
@ -73,16 +73,23 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
|
||||
if till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
till := int64(defaultNotaryDepositLifetime)
|
||||
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tillStr != "" {
|
||||
till, err = strconv.ParseInt(tillStr, 10, 64)
|
||||
if err != nil || till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
}
|
||||
}
|
||||
|
||||
return transferGas(cmd, acc, accHash, gasAmount, till)
|
||||
}
|
||||
|
||||
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
|
|||
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
|
||||
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
|
||||
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
|
||||
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -20,32 +20,23 @@ const (
|
|||
accountAddressFlag = "account"
|
||||
)
|
||||
|
||||
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
|
||||
var addrs []util.Uint160
|
||||
|
||||
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
|
||||
for _, acc := range accs {
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func addProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "addAccount")
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "addAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func removeProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "removeAccount")
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "removeAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
|
||||
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
|
||||
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't initialize context: %w", err)
|
||||
|
@ -63,9 +54,7 @@ func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) err
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
for _, addr := range addrs {
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
}
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
|
||||
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -29,15 +29,13 @@ var (
|
|||
|
||||
func initProxyAddAccount() {
|
||||
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initProxyRemoveAccount() {
|
||||
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
@ -106,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
|
|||
fatalOnErr(errors.New("can't find account in wallet"))
|
||||
}
|
||||
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
|
||||
fatalOnErr(err)
|
||||
|
||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||
|
@ -411,7 +410,8 @@ func initClient(rpc []string) *rpcclient.Client {
|
|||
var c *rpcclient.Client
|
||||
var err error
|
||||
|
||||
shuffled := slices.Clone(rpc)
|
||||
shuffled := make([]string, len(rpc))
|
||||
copy(shuffled, rpc)
|
||||
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||
|
||||
for _, endpoint := range shuffled {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||
|
@ -77,31 +78,13 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
|||
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||
list := x.cliRes.Containers()
|
||||
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
|
||||
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||
return strings.Compare(lhs, rhs) < 0
|
||||
})
|
||||
return list
|
||||
}
|
||||
|
||||
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
|
||||
cliPrm := &client.PrmContainerListStream{
|
||||
XHeaders: prm.XHeaders,
|
||||
OwnerID: prm.OwnerID,
|
||||
Session: prm.Session,
|
||||
}
|
||||
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init container list: %w", err)
|
||||
}
|
||||
|
||||
err = rdr.Iterate(processCnr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read container list: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PutContainerPrm groups parameters of PutContainer operation.
|
||||
type PutContainerPrm struct {
|
||||
Client *client.Client
|
||||
|
|
|
@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
|
|||
|
||||
outputPath, _ := cmd.Flags().GetString(outputFlag)
|
||||
if outputPath != "" {
|
||||
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
|
||||
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
|
||||
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
|
||||
} else {
|
||||
fmt.Print("\n")
|
||||
|
|
|
@ -6,11 +6,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// flags of list command.
|
||||
|
@ -54,60 +51,44 @@ var listContainersCmd = &cobra.Command{
|
|||
|
||||
var prm internalclient.ListContainersPrm
|
||||
prm.SetClient(cli)
|
||||
prm.OwnerID = idUser
|
||||
prm.Account = idUser
|
||||
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
prmGet := internalclient.GetContainerPrm{
|
||||
Client: cli,
|
||||
}
|
||||
var containerIDs []cid.ID
|
||||
|
||||
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
|
||||
printContainer(cmd, prmGet, id)
|
||||
return false
|
||||
})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
containerIDs = res.SortedIDList()
|
||||
} else {
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
}
|
||||
|
||||
containerIDs := res.SortedIDList()
|
||||
for _, cnrID := range containerIDs {
|
||||
printContainer(cmd, prmGet, cnrID)
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(cnrID.String())
|
||||
continue
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &cnrID
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
continue
|
||||
}
|
||||
cmd.Println(cnrID.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(id.String())
|
||||
return
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &id
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
return
|
||||
}
|
||||
cmd.Println(id.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initContainerListContainersCmd() {
|
||||
commonflags.Init(listContainersCmd)
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
|
|||
nodes map[string]netmap.NodeInfo
|
||||
}
|
||||
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
|
||||
return &policyPlaygroundREPL{
|
||||
cmd: cmd,
|
||||
nodes: map[string]netmap.NodeInfo{},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
|
||||
|
@ -246,7 +246,8 @@ var policyPlaygroundCmd = &cobra.Command{
|
|||
Long: `A REPL for testing placement policies.
|
||||
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
repl := newPolicyPlaygroundREPL(cmd)
|
||||
repl, err := newPolicyPlaygroundREPL(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
|
||||
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
|
||||
},
|
||||
}
|
||||
|
|
56
cmd/frostfs-cli/modules/control/evacuate_shard.go
Normal file
56
cmd/frostfs-cli/modules/control/evacuate_shard.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const ignoreErrorsFlag = "no-errors"
|
||||
|
||||
var evacuateShardCmd = &cobra.Command{
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
Long: "Evacuate objects from shard to other shards",
|
||||
Run: evacuateShard,
|
||||
Deprecated: "use frostfs-cli control shards evacuation start",
|
||||
}
|
||||
|
||||
func evacuateShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
|
||||
req.Body.Shard_ID = getShardIDList(cmd)
|
||||
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.EvacuateShardResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.EvacuateShard(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard has successfully been evacuated.")
|
||||
}
|
||||
|
||||
func initControlEvacuateShardCmd() {
|
||||
initControlFlags(evacuateShardCmd)
|
||||
|
||||
flags := evacuateShardCmd.Flags()
|
||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
||||
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||
}
|
|
@ -17,11 +17,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
ignoreErrorsFlag = "no-errors"
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
|
||||
containerWorkerCountFlag = "container-worker-count"
|
||||
objectWorkerCountFlag = "object-worker-count"
|
||||
|
|
|
@ -13,6 +13,7 @@ var shardsCmd = &cobra.Command{
|
|||
func initControlShardsCmd() {
|
||||
shardsCmd.AddCommand(listShardsCmd)
|
||||
shardsCmd.AddCommand(setShardModeCmd)
|
||||
shardsCmd.AddCommand(evacuateShardCmd)
|
||||
shardsCmd.AddCommand(evacuationShardCmd)
|
||||
shardsCmd.AddCommand(flushCacheCmd)
|
||||
shardsCmd.AddCommand(doctorCmd)
|
||||
|
@ -22,6 +23,7 @@ func initControlShardsCmd() {
|
|||
|
||||
initControlShardsListCmd()
|
||||
initControlSetShardModeCmd()
|
||||
initControlEvacuateShardCmd()
|
||||
initControlEvacuationShardCmd()
|
||||
initControlFlushCacheCmd()
|
||||
initControlDoctorCmd()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -42,8 +43,6 @@ func initObjectHashCmd() {
|
|||
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
|
||||
_ = objectHashCmd.MarkFlagRequired("range")
|
||||
|
||||
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
|
||||
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
|
||||
}
|
||||
|
@ -67,6 +66,36 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
tz := typ == hashTz
|
||||
fullHash := len(ranges) == 0
|
||||
if fullHash {
|
||||
var headPrm internalclient.HeadObjectPrm
|
||||
headPrm.SetClient(cli)
|
||||
Prepare(cmd, &headPrm)
|
||||
headPrm.SetAddress(objAddr)
|
||||
|
||||
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
|
||||
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
var cs checksum.Checksum
|
||||
var csSet bool
|
||||
|
||||
if tz {
|
||||
cs, csSet = res.Header().PayloadHomomorphicHash()
|
||||
} else {
|
||||
cs, csSet = res.Header().PayloadChecksum()
|
||||
}
|
||||
|
||||
if csSet {
|
||||
cmd.Println(hex.EncodeToString(cs.Value()))
|
||||
} else {
|
||||
cmd.Println("Missing checksum in object header.")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var hashPrm internalclient.HashPayloadRangesPrm
|
||||
hashPrm.SetClient(cli)
|
||||
Prepare(cmd, &hashPrm)
|
||||
|
@ -75,7 +104,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
hashPrm.SetSalt(salt)
|
||||
hashPrm.SetRanges(ranges)
|
||||
|
||||
if typ == hashTz {
|
||||
if tz {
|
||||
hashPrm.TZ()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
|
@ -504,6 +507,7 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID,
|
|||
}
|
||||
|
||||
func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
|
||||
normilizeObjectNodesResult(objects, result)
|
||||
if json, _ := cmd.Flags().GetBool(commonflags.JSON); json {
|
||||
printObjectNodesAsJSON(cmd, objID, objects, result)
|
||||
} else {
|
||||
|
@ -511,6 +515,34 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul
|
|||
}
|
||||
}
|
||||
|
||||
func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) {
|
||||
slices.SortFunc(objects, func(lhs, rhs phyObject) int {
|
||||
if lhs.ecHeader == nil && rhs.ecHeader == nil {
|
||||
return bytes.Compare(lhs.objectID[:], rhs.objectID[:])
|
||||
}
|
||||
if lhs.ecHeader == nil {
|
||||
return -1
|
||||
}
|
||||
if rhs.ecHeader == nil {
|
||||
return 1
|
||||
}
|
||||
if lhs.ecHeader.parent == rhs.ecHeader.parent {
|
||||
return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index)
|
||||
}
|
||||
return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:])
|
||||
})
|
||||
for _, obj := range objects {
|
||||
op := result.placements[obj.objectID]
|
||||
slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
|
||||
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
|
||||
})
|
||||
slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int {
|
||||
return bytes.Compare(lhs.PublicKey(), rhs.PublicKey())
|
||||
})
|
||||
result.placements[obj.objectID] = op
|
||||
}
|
||||
}
|
||||
|
||||
func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) {
|
||||
fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects))
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
@ -47,7 +46,7 @@ func reloadConfig() error {
|
|||
return logPrm.Reload()
|
||||
}
|
||||
|
||||
func watchForSignal(ctx context.Context, cancel func()) {
|
||||
func watchForSignal(cancel func()) {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
|
@ -59,49 +58,49 @@ func watchForSignal(ctx context.Context, cancel func()) {
|
|||
// signals causing application to shut down should have priority over
|
||||
// reconfiguration signal
|
||||
case <-ch:
|
||||
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
cancel()
|
||||
shutdown(ctx)
|
||||
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
shutdown()
|
||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-intErr: // internal application error
|
||||
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
cancel()
|
||||
shutdown(ctx)
|
||||
shutdown()
|
||||
return
|
||||
default:
|
||||
// block until any signal is receieved
|
||||
select {
|
||||
case <-ch:
|
||||
log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
cancel()
|
||||
shutdown(ctx)
|
||||
log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
shutdown()
|
||||
log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-intErr: // internal application error
|
||||
log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||
cancel()
|
||||
shutdown(ctx)
|
||||
shutdown()
|
||||
return
|
||||
case <-sighupCh:
|
||||
log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
|
||||
log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
log.Info(logs.FrostFSNodeSIGHUPSkip)
|
||||
break
|
||||
}
|
||||
err := reloadConfig()
|
||||
if err != nil {
|
||||
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
pprofCmp.reload(ctx)
|
||||
metricsCmp.reload(ctx)
|
||||
log.Info(ctx, logs.FrostFSIRReloadExtraWallets)
|
||||
pprofCmp.reload()
|
||||
metricsCmp.reload()
|
||||
log.Info(logs.FrostFSIRReloadExtraWallets)
|
||||
err = innerRing.SetExtraWallets(cfg)
|
||||
if err != nil {
|
||||
log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
}
|
||||
innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -25,8 +24,8 @@ const (
|
|||
shutdownTimeoutKeyPostfix = ".shutdown_timeout"
|
||||
)
|
||||
|
||||
func (c *httpComponent) init(ctx context.Context) {
|
||||
log.Info(ctx, "init "+c.name)
|
||||
func (c *httpComponent) init() {
|
||||
log.Info("init " + c.name)
|
||||
c.enabled = cfg.GetBool(c.name + enabledKeyPostfix)
|
||||
c.address = cfg.GetString(c.name + addressKeyPostfix)
|
||||
c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix)
|
||||
|
@ -40,14 +39,14 @@ func (c *httpComponent) init(ctx context.Context) {
|
|||
httputil.WithShutdownTimeout(c.shutdownDur),
|
||||
)
|
||||
} else {
|
||||
log.Info(ctx, c.name+" is disabled, skip")
|
||||
log.Info(c.name + " is disabled, skip")
|
||||
c.srv = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) start(ctx context.Context) {
|
||||
func (c *httpComponent) start() {
|
||||
if c.srv != nil {
|
||||
log.Info(ctx, "start "+c.name)
|
||||
log.Info("start " + c.name)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -56,10 +55,10 @@ func (c *httpComponent) start(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *httpComponent) shutdown(ctx context.Context) error {
|
||||
func (c *httpComponent) shutdown() error {
|
||||
if c.srv != nil {
|
||||
log.Info(ctx, "shutdown "+c.name)
|
||||
return c.srv.Shutdown(ctx)
|
||||
log.Info("shutdown " + c.name)
|
||||
return c.srv.Shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -71,17 +70,17 @@ func (c *httpComponent) needReload() bool {
|
|||
return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur)
|
||||
}
|
||||
|
||||
func (c *httpComponent) reload(ctx context.Context) {
|
||||
log.Info(ctx, "reload "+c.name)
|
||||
func (c *httpComponent) reload() {
|
||||
log.Info("reload " + c.name)
|
||||
if c.needReload() {
|
||||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.Error(err),
|
||||
log.Info(c.name + " config updated")
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.init(ctx)
|
||||
c.start(ctx)
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,48 +87,48 @@ func main() {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
pprofCmp = newPprofComponent()
|
||||
pprofCmp.init(ctx)
|
||||
pprofCmp.init()
|
||||
|
||||
metricsCmp = newMetricsComponent()
|
||||
metricsCmp.init(ctx)
|
||||
metricsCmp.init()
|
||||
audit.Store(cfg.GetBool("audit.enabled"))
|
||||
|
||||
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit)
|
||||
exitErr(err)
|
||||
|
||||
pprofCmp.start(ctx)
|
||||
metricsCmp.start(ctx)
|
||||
pprofCmp.start()
|
||||
metricsCmp.start()
|
||||
|
||||
// start inner ring
|
||||
err = innerRing.Start(ctx, intErr)
|
||||
exitErr(err)
|
||||
|
||||
log.Info(ctx, logs.CommonApplicationStarted,
|
||||
log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
watchForSignal(ctx, cancel)
|
||||
watchForSignal(cancel)
|
||||
|
||||
<-ctx.Done() // graceful shutdown
|
||||
log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
wg.Wait()
|
||||
|
||||
log.Info(ctx, logs.FrostFSIRApplicationStopped)
|
||||
log.Info(logs.FrostFSIRApplicationStopped)
|
||||
}
|
||||
|
||||
func shutdown(ctx context.Context) {
|
||||
innerRing.Stop(ctx)
|
||||
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.Error(err),
|
||||
func shutdown() {
|
||||
innerRing.Stop()
|
||||
if err := metricsCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.Error(err),
|
||||
if err := pprofCmp.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
if err := sdnotify.ClearStatus(); err != nil {
|
||||
log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -29,8 +28,8 @@ func newPprofComponent() *pprofComponent {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *pprofComponent) init(ctx context.Context) {
|
||||
c.httpComponent.init(ctx)
|
||||
func (c *pprofComponent) init() {
|
||||
c.httpComponent.init()
|
||||
|
||||
if c.enabled {
|
||||
c.blockRate = cfg.GetInt(pprofBlockRateKey)
|
||||
|
@ -52,17 +51,17 @@ func (c *pprofComponent) needReload() bool {
|
|||
c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate)
|
||||
}
|
||||
|
||||
func (c *pprofComponent) reload(ctx context.Context) {
|
||||
log.Info(ctx, "reload "+c.name)
|
||||
func (c *pprofComponent) reload() {
|
||||
log.Info("reload " + c.name)
|
||||
if c.needReload() {
|
||||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.Error(err))
|
||||
log.Info(c.name + " config updated")
|
||||
if err := c.shutdown(); err != nil {
|
||||
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.init(ctx)
|
||||
c.start(ctx)
|
||||
c.init()
|
||||
c.start()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
||||
|
||||
blz := openBlobovnicza(cmd)
|
||||
defer blz.Close(cmd.Context())
|
||||
defer blz.Close()
|
||||
|
||||
var prm blobovnicza.GetPrm
|
||||
prm.SetAddress(addr)
|
||||
|
|
|
@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
blz := openBlobovnicza(cmd)
|
||||
defer blz.Close(cmd.Context())
|
||||
defer blz.Close()
|
||||
|
||||
err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr)
|
||||
common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err))
|
||||
|
|
|
@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
|
|||
blobovnicza.WithPath(vPath),
|
||||
blobovnicza.WithReadOnly(true),
|
||||
)
|
||||
common.ExitOnErr(cmd, blz.Open(cmd.Context()))
|
||||
common.ExitOnErr(cmd, blz.Open())
|
||||
|
||||
return blz
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
|
|||
common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err))
|
||||
|
||||
db := openMeta(cmd)
|
||||
defer db.Close(cmd.Context())
|
||||
defer db.Close()
|
||||
|
||||
storageID := meta.StorageIDPrm{}
|
||||
storageID.SetAddress(addr)
|
||||
|
|
|
@ -19,7 +19,7 @@ func init() {
|
|||
|
||||
func listGarbageFunc(cmd *cobra.Command, _ []string) {
|
||||
db := openMeta(cmd)
|
||||
defer db.Close(cmd.Context())
|
||||
defer db.Close()
|
||||
|
||||
var garbPrm meta.GarbageIterationPrm
|
||||
garbPrm.SetHandler(
|
||||
|
|
|
@ -19,7 +19,7 @@ func init() {
|
|||
|
||||
func listGraveyardFunc(cmd *cobra.Command, _ []string) {
|
||||
db := openMeta(cmd)
|
||||
defer db.Close(cmd.Context())
|
||||
defer db.Close()
|
||||
|
||||
var gravePrm meta.GraveyardIterationPrm
|
||||
gravePrm.SetHandler(
|
||||
|
|
|
@ -124,7 +124,10 @@ func (v *BucketsView) loadNodeChildren(
|
|||
path := parentBucket.Path
|
||||
parser := parentBucket.NextParser
|
||||
|
||||
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
|
||||
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for item := range buffer {
|
||||
if item.err != nil {
|
||||
|
@ -132,7 +135,6 @@ func (v *BucketsView) loadNodeChildren(
|
|||
}
|
||||
bucket := item.val
|
||||
|
||||
var err error
|
||||
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -178,7 +180,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
defer cancel()
|
||||
|
||||
// Check the current bucket's nested buckets if exist
|
||||
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for item := range bucketsBuffer {
|
||||
if item.err != nil {
|
||||
|
@ -186,7 +191,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
b := item.val
|
||||
|
||||
var err error
|
||||
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -202,7 +206,10 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
|
||||
// Check the current bucket's nested records if exist
|
||||
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for item := range recordsBuffer {
|
||||
if item.err != nil {
|
||||
|
@ -210,7 +217,6 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
r := item.val
|
||||
|
||||
var err error
|
||||
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
|
|||
func load[T any](
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
filter func(key, value []byte) bool, transform func(key, value []byte) T,
|
||||
) <-chan Item[T] {
|
||||
) (<-chan Item[T], error) {
|
||||
buffer := make(chan Item[T], bufferSize)
|
||||
|
||||
go func() {
|
||||
|
@ -77,13 +77,13 @@ func load[T any](
|
|||
}
|
||||
}()
|
||||
|
||||
return buffer
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
func LoadBuckets(
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
) <-chan Item[*Bucket] {
|
||||
buffer := load(
|
||||
) (<-chan Item[*Bucket], error) {
|
||||
buffer, err := load(
|
||||
ctx, db, path, bufferSize,
|
||||
func(_, value []byte) bool {
|
||||
return value == nil
|
||||
|
@ -98,14 +98,17 @@ func LoadBuckets(
|
|||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
||||
}
|
||||
|
||||
return buffer
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
func LoadRecords(
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
) <-chan Item[*Record] {
|
||||
buffer := load(
|
||||
) (<-chan Item[*Record], error) {
|
||||
buffer, err := load(
|
||||
ctx, db, path, bufferSize,
|
||||
func(_, value []byte) bool {
|
||||
return value != nil
|
||||
|
@ -121,8 +124,11 @@ func LoadRecords(
|
|||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
||||
}
|
||||
|
||||
return buffer
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
|
||||
|
@ -131,21 +137,24 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
buffer := load(
|
||||
buffer, err := load(
|
||||
ctx, db, path, 1,
|
||||
nil,
|
||||
func(_, value []byte) []byte { return value },
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
x, ok := <-buffer
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
if x.err != nil {
|
||||
return false, x.err
|
||||
return false, err
|
||||
}
|
||||
if x.val != nil {
|
||||
return false, nil
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -62,7 +62,10 @@ func (v *RecordsView) Mount(ctx context.Context) error {
|
|||
|
||||
ctx, v.onUnmount = context.WithCancel(ctx)
|
||||
|
||||
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
|
||||
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.buffer = make(chan *Record, v.ui.loadBufferSize)
|
||||
go func() {
|
||||
|
@ -70,12 +73,11 @@ func (v *RecordsView) Mount(ctx context.Context) error {
|
|||
|
||||
for item := range tempBuffer {
|
||||
if item.err != nil {
|
||||
v.ui.stopOnError(item.err)
|
||||
v.ui.stopOnError(err)
|
||||
break
|
||||
}
|
||||
record := item.val
|
||||
|
||||
var err error
|
||||
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
|
||||
if err != nil {
|
||||
v.ui.stopOnError(err)
|
||||
|
|
|
@ -19,7 +19,6 @@ func initAPEManagerService(c *cfg) {
|
|||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
|
||||
c.cfgMorph.client,
|
||||
apemanager.WithLogger(c.log))
|
||||
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
|
||||
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
|
||||
|
|
|
@ -397,16 +397,16 @@ type internals struct {
|
|||
}
|
||||
|
||||
// starts node's maintenance.
|
||||
func (c *cfg) startMaintenance(ctx context.Context) {
|
||||
func (c *cfg) startMaintenance() {
|
||||
c.isMaintenance.Store(true)
|
||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||
c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||
}
|
||||
|
||||
// stops node's maintenance.
|
||||
func (c *internals) stopMaintenance(ctx context.Context) {
|
||||
func (c *internals) stopMaintenance() {
|
||||
if c.isMaintenance.CompareAndSwap(true, false) {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,6 +591,8 @@ type cfgMorph struct {
|
|||
|
||||
client *client.Client
|
||||
|
||||
notaryEnabled bool
|
||||
|
||||
// TTL of Sidechain cached values. Non-positive value disables caching.
|
||||
cacheTTL time.Duration
|
||||
|
||||
|
@ -606,10 +608,9 @@ type cfgAccounting struct {
|
|||
type cfgContainer struct {
|
||||
scriptHash neogoutil.Uint160
|
||||
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
containerBatchSize uint32
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
}
|
||||
|
||||
type cfgFrostfsID struct {
|
||||
|
@ -698,12 +699,13 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||
return lokiCore
|
||||
}))
|
||||
|
@ -852,8 +854,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
|
|||
}
|
||||
|
||||
func initCfgGRPC() cfgGRPC {
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
|
||||
|
||||
return cfgGRPC{
|
||||
maxChunkSize: maxChunkSize,
|
||||
|
@ -1058,7 +1060,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
|||
return sh
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() *logger.Prm {
|
||||
func (c *cfg) loggerPrm() (*logger.Prm, error) {
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.logger == nil {
|
||||
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||
|
@ -1077,7 +1079,7 @@ func (c *cfg) loggerPrm() *logger.Prm {
|
|||
}
|
||||
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
|
||||
return c.dynamicConfiguration.logger
|
||||
return c.dynamicConfiguration.logger, nil
|
||||
}
|
||||
|
||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||
|
@ -1087,7 +1089,7 @@ func (c *cfg) LocalAddress() network.AddressGroup {
|
|||
func initLocalStorage(ctx context.Context, c *cfg) {
|
||||
ls := engine.New(c.engineOpts()...)
|
||||
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
|
||||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
||||
ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber())
|
||||
})
|
||||
|
||||
|
@ -1101,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
shard.WithTombstoneSource(c.createTombstoneSource()),
|
||||
shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||
} else {
|
||||
shardsAttached++
|
||||
c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||
}
|
||||
}
|
||||
if shardsAttached == 0 {
|
||||
|
@ -1114,23 +1116,23 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||
|
||||
err := ls.Close(context.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.Error(err),
|
||||
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
||||
func initAccessPolicyEngine(_ context.Context, c *cfg) {
|
||||
var localOverrideDB chainbase.LocalOverrideDatabase
|
||||
if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" {
|
||||
c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
||||
c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed)
|
||||
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
|
||||
} else {
|
||||
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
|
||||
|
@ -1146,7 +1148,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
|||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
|
||||
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
|
||||
if cacheSize > 0 {
|
||||
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
|
||||
}
|
||||
|
||||
|
@ -1155,7 +1157,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
|||
|
||||
c.onShutdown(func() {
|
||||
if err := ape.LocalOverrideDatabaseCore().Close(); err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
||||
c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
@ -1204,12 +1206,12 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
|
|||
c.cfgNetmap.state.setNodeInfo(ni)
|
||||
}
|
||||
|
||||
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
||||
func (c *cfg) updateContractNodeInfo(epoch uint64) {
|
||||
ni, err := c.netmapLocalNodeState(epoch)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.Error(err))
|
||||
zap.String("error", err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1219,37 +1221,41 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
|||
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
||||
// with the binary-encoded information from the current node's configuration.
|
||||
// The state is set using the provided setter which MUST NOT be nil.
|
||||
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
|
||||
func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
|
||||
ni := c.cfgNodeInfo.localInfo
|
||||
ni.SetStatus(state)
|
||||
stateSetter(&ni)
|
||||
|
||||
prm := nmClient.AddPeerPrm{}
|
||||
prm.SetNodeInfo(ni)
|
||||
|
||||
return c.cfgNetmap.wrapper.AddPeer(ctx, prm)
|
||||
return c.cfgNetmap.wrapper.AddPeer(prm)
|
||||
}
|
||||
|
||||
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
||||
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
||||
return c.bootstrapWithState(ctx, netmap.Online)
|
||||
func bootstrapOnline(c *cfg) error {
|
||||
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Online)
|
||||
})
|
||||
}
|
||||
|
||||
// bootstrap calls bootstrapWithState with:
|
||||
// - "maintenance" state if maintenance is in progress on the current node
|
||||
// - "online", otherwise
|
||||
func (c *cfg) bootstrap(ctx context.Context) error {
|
||||
func (c *cfg) bootstrap() error {
|
||||
// switch to online except when under maintenance
|
||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState(ctx, netmap.Maintenance)
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Maintenance)
|
||||
})
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
zap.Stringer("previous", st),
|
||||
)
|
||||
|
||||
return bootstrapOnline(ctx, c)
|
||||
return bootstrapOnline(c)
|
||||
}
|
||||
|
||||
// needBootstrap checks if local node should be registered in network on bootup.
|
||||
|
@ -1274,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
// signals causing application to shut down should have priority over
|
||||
// reconfiguration signal
|
||||
case <-ch:
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
|
||||
c.shutdown(ctx)
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown(ctx)
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
default:
|
||||
// block until any signal is receieved
|
||||
|
@ -1294,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
case <-sighupCh:
|
||||
c.reloadConfig(ctx)
|
||||
case <-ch:
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
|
||||
c.shutdown(ctx)
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||
return
|
||||
case err := <-c.internalErr: // internal application error
|
||||
c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
|
||||
c.shutdown(ctx)
|
||||
c.shutdown()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1314,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||
c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
|
||||
if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip)
|
||||
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
|
||||
return
|
||||
}
|
||||
defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
|
||||
err := c.reloadAppConfig()
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1339,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
// Logger
|
||||
|
||||
logPrm := c.loggerPrm()
|
||||
logPrm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
components := c.getComponents(ctx, logPrm)
|
||||
|
||||
|
@ -1352,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
err = component.reloadFunc()
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||
zap.String("component", component.name),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
|
||||
c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||
}
|
||||
|
||||
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||
|
@ -1378,7 +1388,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
|||
|
||||
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||
components = append(components, dCmp{"runtime", func() error {
|
||||
setRuntimeParameters(ctx, c)
|
||||
setRuntimeParameters(c)
|
||||
return nil
|
||||
}})
|
||||
components = append(components, dCmp{"audit", func() error {
|
||||
|
@ -1393,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
|||
}
|
||||
updated, err := tracing.Setup(ctx, *traceConfig)
|
||||
if updated {
|
||||
c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated)
|
||||
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
||||
}
|
||||
return err
|
||||
}})
|
||||
|
@ -1428,7 +1438,7 @@ func (c *cfg) reloadPools() error {
|
|||
func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) {
|
||||
oldSize := p.Cap()
|
||||
if oldSize != newSize {
|
||||
c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
||||
c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name),
|
||||
zap.Int("old", oldSize), zap.Int("new", newSize))
|
||||
p.Tune(newSize)
|
||||
}
|
||||
|
@ -1456,7 +1466,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
|||
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
|
||||
return container.NewInfoProvider(func() (container.Source, error) {
|
||||
c.initMorphComponents(ctx)
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1464,14 +1474,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro
|
|||
})
|
||||
}
|
||||
|
||||
func (c *cfg) shutdown(ctx context.Context) {
|
||||
old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN)
|
||||
func (c *cfg) shutdown() {
|
||||
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
||||
if old == control.HealthStatus_SHUTTING_DOWN {
|
||||
c.log.Info(ctx, logs.FrostFSNodeShutdownSkip)
|
||||
c.log.Info(logs.FrostFSNodeShutdownSkip)
|
||||
return
|
||||
}
|
||||
if old == control.HealthStatus_STARTING {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady)
|
||||
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
|
||||
}
|
||||
|
||||
c.ctxCancel()
|
||||
|
@ -1481,6 +1491,6 @@ func (c *cfg) shutdown(ctx context.Context) {
|
|||
}
|
||||
|
||||
if err := sdnotify.ClearStatus(); err != nil {
|
||||
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
|
@ -53,5 +52,6 @@ func (x *Config) Value(name string) any {
|
|||
// It supports only one level of nesting and is intended to be used
|
||||
// to provide default values.
|
||||
func (x *Config) SetDefault(from *Config) {
|
||||
x.defaultPath = slices.Clone(from.path)
|
||||
x.defaultPath = make([]string, len(from.path))
|
||||
copy(x.defaultPath, from.path)
|
||||
}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
package containerconfig
|
||||
|
||||
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
|
||||
const (
|
||||
subsection = "container"
|
||||
listStreamSubsection = "list_stream"
|
||||
|
||||
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
|
||||
ContainerBatchSizeDefault = 1000
|
||||
)
|
||||
|
||||
// ContainerBatchSize returns the value of "batch_size" config parameter
|
||||
// from "list_stream" subsection of "container" section.
|
||||
//
|
||||
// Returns ContainerBatchSizeDefault if the value is missing or if
|
||||
// the value is not positive integer.
|
||||
func ContainerBatchSize(c *config.Config) uint32 {
|
||||
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
|
||||
if size == 0 {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
return size
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package containerconfig_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestContainerSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -41,10 +41,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
|||
c.Sub(si),
|
||||
)
|
||||
|
||||
if sc.Mode() == mode.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
// Path for the blobstor can't be present in the default section, because different shards
|
||||
// must have different paths, so if it is missing, the shard is not here.
|
||||
// At the same time checking for "blobstor" section doesn't work proper
|
||||
|
@ -54,6 +50,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config)
|
|||
}
|
||||
(*config.Config)(sc).SetDefault(def)
|
||||
|
||||
if sc.Mode() == mode.Disabled {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := f(sc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -18,22 +18,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIterateShards(t *testing.T) {
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var res []string
|
||||
require.NoError(t,
|
||||
engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
||||
res = append(res, sc.Metabase().Path())
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, []string{"abc", "xyz"}, res)
|
||||
}
|
||||
|
||||
const cfgDir = "./testdata/shards"
|
||||
configtest.ForEachFileType(cfgDir, fileConfigTest)
|
||||
configtest.ForEnvFileType(t, cfgDir, fileConfigTest)
|
||||
}
|
||||
|
||||
func TestEngineSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc
|
||||
FROSTFS_STORAGE_SHARD_1_MODE=disabled
|
||||
FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz
|
|
@ -1,13 +0,0 @@
|
|||
{
|
||||
"storage.shard": {
|
||||
"0": {
|
||||
"metabase.path": "abc"
|
||||
},
|
||||
"1": {
|
||||
"mode": "disabled"
|
||||
},
|
||||
"2": {
|
||||
"metabase.path": "xyz"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
storage.shard:
|
||||
0:
|
||||
metabase.path: abc
|
||||
1:
|
||||
mode: disabled
|
||||
2:
|
||||
metabase.path: xyz
|
|
@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
|
|||
//
|
||||
// Returns PermDefault if the value is not a positive number.
|
||||
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
|
||||
p := config.UintSafe(l.cfg, "perm")
|
||||
p := config.UintSafe((*config.Config)(l.cfg), "perm")
|
||||
if p == 0 {
|
||||
p = PermDefault
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
|
|||
//
|
||||
// Returns false if the value is not a boolean.
|
||||
func (l PersistentPolicyRulesConfig) NoSync() bool {
|
||||
return config.BoolSafe(l.cfg, "no_sync")
|
||||
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
|
||||
}
|
||||
|
||||
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
|
@ -25,7 +24,6 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
|
|||
Service: "frostfs-node",
|
||||
InstanceID: getInstanceIDOrDefault(c),
|
||||
Version: misc.Version,
|
||||
Attributes: make(map[string]string),
|
||||
}
|
||||
|
||||
if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
|
||||
|
@ -40,30 +38,11 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
|
|||
}
|
||||
conf.ServerCaCertPool = certPool
|
||||
}
|
||||
|
||||
i := uint64(0)
|
||||
for ; ; i++ {
|
||||
si := strconv.FormatUint(i, 10)
|
||||
ac := c.Sub(subsection).Sub("attributes").Sub(si)
|
||||
k := config.StringSafe(ac, "key")
|
||||
if k == "" {
|
||||
break
|
||||
}
|
||||
v := config.StringSafe(ac, "value")
|
||||
if v == "" {
|
||||
return nil, fmt.Errorf("empty tracing attribute value for key %s", k)
|
||||
}
|
||||
if _, ok := conf.Attributes[k]; ok {
|
||||
return nil, fmt.Errorf("tracing attribute key %s defined more than once", k)
|
||||
}
|
||||
conf.Attributes[k] = v
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func getInstanceIDOrDefault(c *config.Config) string {
|
||||
s := config.StringSliceSafe(c.Sub("node"), "addresses")
|
||||
s := config.StringSlice(c.Sub("node"), "addresses")
|
||||
if len(s) > 0 {
|
||||
return s[0]
|
||||
}
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
package tracing
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTracingSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
tc, err := ToTracingConfig(configtest.EmptyConfig())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, tc.Enabled)
|
||||
require.Equal(t, tracing.Exporter(""), tc.Exporter)
|
||||
require.Equal(t, "", tc.Endpoint)
|
||||
require.Equal(t, "frostfs-node", tc.Service)
|
||||
require.Equal(t, "", tc.InstanceID)
|
||||
require.Nil(t, tc.ServerCaCertPool)
|
||||
require.Empty(t, tc.Attributes)
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
tc, err := ToTracingConfig(c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, tc.Enabled)
|
||||
require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter)
|
||||
require.Equal(t, "localhost", tc.Endpoint)
|
||||
require.Equal(t, "frostfs-node", tc.Service)
|
||||
require.Nil(t, tc.ServerCaCertPool)
|
||||
require.EqualValues(t, map[string]string{
|
||||
"key0": "value",
|
||||
"key1": "value",
|
||||
}, tc.Attributes)
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -5,7 +5,6 @@ import (
|
|||
"context"
|
||||
"net"
|
||||
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
|
@ -29,7 +28,7 @@ import (
|
|||
func initContainerService(_ context.Context, c *cfg) {
|
||||
// container wrapper that tries to invoke notary
|
||||
// requests if chain is configured so
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
|
||||
fatalOnErr(err)
|
||||
|
||||
c.shared.cnrClient = wrap
|
||||
|
@ -43,12 +42,11 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
fatalOnErr(err)
|
||||
|
||||
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
||||
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
|
||||
if cacheSize > 0 {
|
||||
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
|
||||
}
|
||||
|
||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
|
||||
|
||||
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
||||
|
@ -58,9 +56,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
&c.key.PrivateKey,
|
||||
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
||||
containerService.NewSplitterService(
|
||||
c.cfgContainer.containerBatchSize, c.respSvc,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
|
||||
),
|
||||
)
|
||||
service = containerService.NewAuditService(service, c.log, c.audit)
|
||||
|
@ -93,7 +89,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
if c.cfgMorph.containerCacheSize > 0 {
|
||||
containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
|
||||
|
||||
subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) {
|
||||
subscribeToContainerCreation(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.PutSuccess)
|
||||
|
||||
// read owner of the created container in order to update the reading cache.
|
||||
|
@ -106,21 +102,21 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
} else {
|
||||
// unlike removal, we expect successful receive of the container
|
||||
// after successful creation, so logging can be useful
|
||||
c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||
zap.Stringer("id", ev.ID),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
||||
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
|
||||
subscribeToContainerRemoval(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.DeleteSuccess)
|
||||
containerCache.handleRemoval(ev.ID)
|
||||
c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
)
|
||||
})
|
||||
|
@ -222,7 +218,6 @@ type morphContainerReader struct {
|
|||
|
||||
lister interface {
|
||||
ContainersOf(*user.ID) ([]cid.ID, error)
|
||||
IterateContainersOf(*user.ID, func(cid.ID) error) error
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,18 +233,14 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
|
|||
return x.lister.ContainersOf(id)
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
|
||||
return x.lister.IterateContainersOf(id, processCID)
|
||||
}
|
||||
|
||||
type morphContainerWriter struct {
|
||||
neoClient *cntClient.Client
|
||||
}
|
||||
|
||||
func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) {
|
||||
return cntClient.Put(ctx, m.neoClient, cnr)
|
||||
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
|
||||
return cntClient.Put(m.neoClient, cnr)
|
||||
}
|
||||
|
||||
func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error {
|
||||
return cntClient.Delete(ctx, m.neoClient, witness)
|
||||
func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error {
|
||||
return cntClient.Delete(m.neoClient, witness)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
const serviceNameControl = "control"
|
||||
|
||||
func initControlService(ctx context.Context, c *cfg) {
|
||||
func initControlService(c *cfg) {
|
||||
endpoint := controlconfig.GRPC(c.appCfg).Endpoint()
|
||||
if endpoint == controlconfig.GRPCEndpointDefault {
|
||||
return
|
||||
|
@ -46,21 +46,21 @@ func initControlService(ctx context.Context, c *cfg) {
|
|||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
c.cfgControlService.server = grpc.NewServer()
|
||||
|
||||
c.onShutdown(func() {
|
||||
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
|
||||
stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log)
|
||||
})
|
||||
|
||||
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
|
||||
|
||||
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
|
||||
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", serviceNameControl),
|
||||
zap.String("endpoint", endpoint))
|
||||
fatalOnErr(c.cfgControlService.server.Serve(lis))
|
||||
|
@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
|
|||
return c.cfgNetmap.state.controlNetmapStatus()
|
||||
}
|
||||
|
||||
func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) {
|
||||
c.notifySystemd(ctx, st)
|
||||
func (c *cfg) setHealthStatus(st control.HealthStatus) {
|
||||
c.notifySystemd(st)
|
||||
c.healthStatus.Store(int32(st))
|
||||
c.metricsCollector.State().SetHealth(int32(st))
|
||||
}
|
||||
|
||||
func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) {
|
||||
func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
|
||||
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
||||
c.notifySystemd(ctx, newSt)
|
||||
c.notifySystemd(newSt)
|
||||
c.metricsCollector.State().SetHealth(int32(newSt))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) {
|
||||
func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
|
||||
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
|
||||
c.notifySystemd(ctx, st)
|
||||
c.notifySystemd(st)
|
||||
c.metricsCollector.State().SetHealth(int32(st))
|
||||
return
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus {
|
|||
return control.HealthStatus(c.healthStatus.Load())
|
||||
}
|
||||
|
||||
func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
|
||||
func (c *cfg) notifySystemd(st control.HealthStatus) {
|
||||
if !c.sdNotify {
|
||||
return
|
||||
}
|
||||
|
@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) {
|
|||
err = sdnotify.Status(fmt.Sprintf("%v", st))
|
||||
}
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
|
@ -19,11 +18,11 @@ import (
|
|||
|
||||
const maxRecvMsgSize = 256 << 20
|
||||
|
||||
func initGRPC(ctx context.Context, c *cfg) {
|
||||
func initGRPC(c *cfg) {
|
||||
var endpointsToReconnect []string
|
||||
var successCount int
|
||||
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
||||
serverOpts, ok := getGrpcServerOpts(ctx, c, sc)
|
||||
serverOpts, ok := getGrpcServerOpts(c, sc)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
@ -31,7 +30,7 @@ func initGRPC(ctx context.Context, c *cfg) {
|
|||
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||
if err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint())
|
||||
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint())
|
||||
return
|
||||
}
|
||||
|
@ -40,7 +39,7 @@ func initGRPC(ctx context.Context, c *cfg) {
|
|||
srv := grpc.NewServer(serverOpts...)
|
||||
|
||||
c.onShutdown(func() {
|
||||
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
|
||||
stopGRPC("FrostFS Public API", srv, c.log)
|
||||
})
|
||||
|
||||
c.cfgGRPC.append(sc.Endpoint(), lis, srv)
|
||||
|
@ -53,11 +52,11 @@ func initGRPC(ctx context.Context, c *cfg) {
|
|||
c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg)
|
||||
|
||||
for _, endpoint := range endpointsToReconnect {
|
||||
scheduleReconnect(ctx, endpoint, c)
|
||||
scheduleReconnect(endpoint, c)
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
|
||||
func scheduleReconnect(endpoint string, c *cfg) {
|
||||
c.wg.Add(1)
|
||||
go func() {
|
||||
defer c.wg.Done()
|
||||
|
@ -66,7 +65,7 @@ func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
|
|||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
if tryReconnect(ctx, endpoint, c) {
|
||||
if tryReconnect(endpoint, c) {
|
||||
return
|
||||
}
|
||||
case <-c.done:
|
||||
|
@ -76,20 +75,20 @@ func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) {
|
|||
}()
|
||||
}
|
||||
|
||||
func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
|
||||
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
||||
func tryReconnect(endpoint string, c *cfg) bool {
|
||||
c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint))
|
||||
|
||||
serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c)
|
||||
serverOpts, found := getGRPCEndpointOpts(endpoint, c)
|
||||
if !found {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
||||
c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint))
|
||||
return true
|
||||
}
|
||||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint)
|
||||
c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
||||
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||
c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout))
|
||||
return false
|
||||
}
|
||||
c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint)
|
||||
|
@ -97,16 +96,16 @@ func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool {
|
|||
srv := grpc.NewServer(serverOpts...)
|
||||
|
||||
c.onShutdown(func() {
|
||||
stopGRPC(ctx, "FrostFS Public API", srv, c.log)
|
||||
stopGRPC("FrostFS Public API", srv, c.log)
|
||||
})
|
||||
|
||||
c.cfgGRPC.appendAndHandle(endpoint, lis, srv)
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
||||
c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint))
|
||||
return true
|
||||
}
|
||||
|
||||
func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
|
||||
func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) {
|
||||
unlock := c.LockAppConfigShared()
|
||||
defer unlock()
|
||||
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
|
||||
|
@ -117,7 +116,7 @@ func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result [
|
|||
return
|
||||
}
|
||||
var ok bool
|
||||
result, ok = getGrpcServerOpts(ctx, c, sc)
|
||||
result, ok = getGrpcServerOpts(c, sc)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
@ -126,7 +125,7 @@ func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result [
|
|||
return
|
||||
}
|
||||
|
||||
func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
|
||||
func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) {
|
||||
serverOpts := []grpc.ServerOption{
|
||||
grpc.MaxRecvMsgSize(maxRecvMsgSize),
|
||||
grpc.ChainUnaryInterceptor(
|
||||
|
@ -144,7 +143,7 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
|
|||
if tlsCfg != nil {
|
||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
|
@ -175,38 +174,38 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
|
|||
return serverOpts, true
|
||||
}
|
||||
|
||||
func serveGRPC(ctx context.Context, c *cfg) {
|
||||
func serveGRPC(c *cfg) {
|
||||
c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) {
|
||||
c.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||
zap.Stringer("endpoint", l.Addr()),
|
||||
)
|
||||
|
||||
c.wg.Done()
|
||||
}()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", "gRPC"),
|
||||
zap.Stringer("endpoint", l.Addr()),
|
||||
)
|
||||
|
||||
if err := s.Serve(l); err != nil {
|
||||
c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e)
|
||||
c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err))
|
||||
c.cfgGRPC.dropConnection(e)
|
||||
scheduleReconnect(ctx, e, c)
|
||||
scheduleReconnect(e, c)
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) {
|
||||
l = l.With(zap.String("name", name))
|
||||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
||||
|
||||
l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer)
|
||||
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
||||
|
||||
// GracefulStop() may freeze forever, see #1270
|
||||
done := make(chan struct{})
|
||||
|
@ -218,9 +217,9 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
|
|||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Minute):
|
||||
l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@ type httpComponent struct {
|
|||
preReload func(c *cfg)
|
||||
}
|
||||
|
||||
func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
|
||||
func (cmp *httpComponent) init(c *cfg) {
|
||||
if !cmp.enabled {
|
||||
c.log.Info(ctx, cmp.name+" is disabled")
|
||||
c.log.Info(cmp.name + " is disabled")
|
||||
return
|
||||
}
|
||||
// Init server with parameters
|
||||
|
@ -39,14 +39,14 @@ func (cmp *httpComponent) init(ctx context.Context, c *cfg) {
|
|||
go func() {
|
||||
defer c.wg.Done()
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint,
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", cmp.name),
|
||||
zap.String("endpoint", cmp.address))
|
||||
fatalOnErr(srv.Serve())
|
||||
}()
|
||||
c.closers = append(c.closers, closer{
|
||||
cmp.name,
|
||||
func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) },
|
||||
func() { stopAndLog(c, cmp.name, srv.Shutdown) },
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error {
|
|||
// Cleanup
|
||||
delCloser(cmp.cfg, cmp.name)
|
||||
// Init server with new parameters
|
||||
cmp.init(ctx, cmp.cfg)
|
||||
cmp.init(cmp.cfg)
|
||||
// Start worker
|
||||
if cmp.enabled {
|
||||
startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name))
|
||||
|
|
|
@ -61,21 +61,21 @@ func main() {
|
|||
var ctx context.Context
|
||||
ctx, c.ctxCancel = context.WithCancel(context.Background())
|
||||
|
||||
c.setHealthStatus(ctx, control.HealthStatus_STARTING)
|
||||
c.setHealthStatus(control.HealthStatus_STARTING)
|
||||
|
||||
initApp(ctx, c)
|
||||
|
||||
bootUp(ctx, c)
|
||||
|
||||
c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY)
|
||||
c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY)
|
||||
|
||||
wait(c)
|
||||
}
|
||||
|
||||
func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) {
|
||||
c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name))
|
||||
func initAndLog(c *cfg, name string, initializer func(*cfg)) {
|
||||
c.log.Info(fmt.Sprintf("initializing %s service...", name))
|
||||
initializer(c)
|
||||
c.log.Info(ctx, name+" service has been successfully initialized")
|
||||
c.log.Info(name + " service has been successfully initialized")
|
||||
}
|
||||
|
||||
func initApp(ctx context.Context, c *cfg) {
|
||||
|
@ -85,72 +85,72 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
c.wg.Done()
|
||||
}()
|
||||
|
||||
setRuntimeParameters(ctx, c)
|
||||
setRuntimeParameters(c)
|
||||
metrics, _ := metricsComponent(c)
|
||||
initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) })
|
||||
initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) })
|
||||
initAndLog(c, "profiler", initProfilerService)
|
||||
initAndLog(c, metrics.name, metrics.init)
|
||||
|
||||
initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
||||
initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
|
||||
|
||||
initLocalStorage(ctx, c)
|
||||
|
||||
initAndLog(ctx, c, "storage engine", func(c *cfg) {
|
||||
initAndLog(c, "storage engine", func(c *cfg) {
|
||||
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx))
|
||||
fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx))
|
||||
})
|
||||
|
||||
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
|
||||
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
||||
initAndLog(c, "gRPC", initGRPC)
|
||||
initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
||||
|
||||
initAccessPolicyEngine(ctx, c)
|
||||
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
|
||||
initAndLog(c, "access policy engine", func(c *cfg) {
|
||||
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx))
|
||||
fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init())
|
||||
})
|
||||
|
||||
initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
||||
initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
||||
initAndLog(ctx, c, "session", initSessionService)
|
||||
initAndLog(ctx, c, "object", initObjectService)
|
||||
initAndLog(ctx, c, "tree", initTreeService)
|
||||
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
||||
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
||||
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
|
||||
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
|
||||
initAndLog(c, "session", initSessionService)
|
||||
initAndLog(c, "object", initObjectService)
|
||||
initAndLog(c, "tree", initTreeService)
|
||||
initAndLog(c, "apemanager", initAPEManagerService)
|
||||
initAndLog(c, "control", initControlService)
|
||||
|
||||
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||
initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||
}
|
||||
|
||||
func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) {
|
||||
c.log.Info(ctx, fmt.Sprintf("starting %s service...", name))
|
||||
c.log.Info(fmt.Sprintf("starting %s service...", name))
|
||||
starter(ctx, c)
|
||||
|
||||
if logSuccess {
|
||||
c.log.Info(ctx, name+" service started successfully")
|
||||
c.log.Info(name + " service started successfully")
|
||||
}
|
||||
}
|
||||
|
||||
func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) {
|
||||
c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name))
|
||||
func stopAndLog(c *cfg, name string, stopper func() error) {
|
||||
c.log.Debug(fmt.Sprintf("shutting down %s service", name))
|
||||
|
||||
err := stopper(ctx)
|
||||
err := stopper()
|
||||
if err != nil {
|
||||
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.Error(err),
|
||||
c.log.Debug(fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
c.log.Debug(ctx, name+" service has been stopped")
|
||||
c.log.Debug(name + " service has been stopped")
|
||||
}
|
||||
|
||||
func bootUp(ctx context.Context, c *cfg) {
|
||||
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) })
|
||||
runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) })
|
||||
runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit)
|
||||
|
||||
bootstrapNode(ctx, c)
|
||||
bootstrapNode(c)
|
||||
startWorkers(ctx, c)
|
||||
}
|
||||
|
||||
func wait(c *cfg) {
|
||||
c.log.Info(context.Background(), logs.CommonApplicationStarted,
|
||||
c.log.Info(logs.CommonApplicationStarted,
|
||||
zap.String("version", misc.Version))
|
||||
|
||||
<-c.done // graceful shutdown
|
||||
|
@ -160,12 +160,12 @@ func wait(c *cfg) {
|
|||
go func() {
|
||||
defer drain.Done()
|
||||
for err := range c.internalErr {
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError,
|
||||
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||
zap.String("message", err.Error()))
|
||||
}
|
||||
}()
|
||||
|
||||
c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||
|
||||
c.wg.Wait()
|
||||
|
||||
|
|
|
@ -17,7 +17,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -35,16 +39,20 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
|
||||
lookupScriptHashesInNNS(c) // smart contract auto negotiation
|
||||
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info(logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
|
||||
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
|
||||
fatalOnErr(err)
|
||||
|
||||
var netmapSource netmap.Source
|
||||
|
@ -56,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||
fatalOnErr(err)
|
||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||
c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||
}
|
||||
|
||||
if c.cfgMorph.cacheTTL < 0 {
|
||||
|
@ -94,35 +102,41 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
client.WithDialerSource(c.dialerSource),
|
||||
)
|
||||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.Error(err),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.onShutdown(func() {
|
||||
c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents)
|
||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||
cli.Close()
|
||||
})
|
||||
|
||||
if err := cli.SetGroupSignerScope(); err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
|
||||
}
|
||||
|
||||
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||
tx, vub, err := makeNotaryDeposit(ctx, c)
|
||||
// skip notary deposit in non-notary environments
|
||||
if !c.cfgMorph.notaryEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
tx, vub, err := makeNotaryDeposit(c)
|
||||
fatalOnErr(err)
|
||||
|
||||
if tx.Equals(util.Uint256{}) {
|
||||
// non-error deposit with an empty TX hash means
|
||||
// that the deposit has already been made; no
|
||||
// need to wait it.
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -130,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
|||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) {
|
||||
func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) {
|
||||
const (
|
||||
// gasMultiplier defines how many times more the notary
|
||||
// balance must be compared to the GAS balance of the node:
|
||||
|
@ -147,16 +161,51 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
|
|||
return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err)
|
||||
}
|
||||
|
||||
return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount)
|
||||
return c.cfgMorph.client.DepositEndlessNotary(depositAmount)
|
||||
}
|
||||
|
||||
var (
|
||||
errNotaryDepositFail = errors.New("notary deposit tx has faulted")
|
||||
errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network")
|
||||
)
|
||||
|
||||
type waiterClient struct {
|
||||
c *client.Client
|
||||
}
|
||||
|
||||
func (w *waiterClient) Context() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) {
|
||||
return w.c.GetApplicationLog(hash, trig)
|
||||
}
|
||||
|
||||
func (w *waiterClient) GetBlockCount() (uint32, error) {
|
||||
return w.c.BlockCount()
|
||||
}
|
||||
|
||||
func (w *waiterClient) GetVersion() (*result.Version, error) {
|
||||
return w.c.GetVersion()
|
||||
}
|
||||
|
||||
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
|
||||
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
|
||||
return err
|
||||
w, err := waiter.NewPollingBased(&waiterClient{c: c.cfgMorph.client})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create notary deposit waiter: %w", err)
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
||||
return nil
|
||||
res, err := w.WaitAny(ctx, vub, tx)
|
||||
if err != nil {
|
||||
if errors.Is(err, waiter.ErrTxNotAccepted) {
|
||||
return errNotaryDepositTimeout
|
||||
}
|
||||
return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err)
|
||||
}
|
||||
if res.Execution.VMState.HasFlag(vmstate.Halt) {
|
||||
c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted)
|
||||
return nil
|
||||
}
|
||||
return errNotaryDepositFail
|
||||
}
|
||||
|
||||
func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||
|
@ -168,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -197,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||
res, err := netmapEvent.ParseNewEpoch(src)
|
||||
if err == nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||
)
|
||||
}
|
||||
|
@ -207,12 +256,12 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers)
|
||||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||
|
||||
registerBlockHandler(lis, func(ctx context.Context, block *block.Block) {
|
||||
c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
registerBlockHandler(lis, func(block *block.Block) {
|
||||
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||
|
||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||
if err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState,
|
||||
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
||||
zap.String("chain", "side"),
|
||||
zap.Uint32("block_index", block.Index))
|
||||
}
|
||||
|
@ -223,17 +272,27 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
|
|||
subs map[event.Type][]event.Handler,
|
||||
) {
|
||||
for typ, handlers := range subs {
|
||||
pi := event.NotificationParserInfo{}
|
||||
pi.SetType(typ)
|
||||
pi.SetScriptHash(scHash)
|
||||
|
||||
p, ok := parsers[typ]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("missing parser for event %s", typ))
|
||||
}
|
||||
|
||||
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
|
||||
Contract: scHash,
|
||||
Type: typ,
|
||||
Parser: p,
|
||||
Handlers: handlers,
|
||||
})
|
||||
pi.SetParser(p)
|
||||
|
||||
lis.SetNotificationParser(pi)
|
||||
|
||||
for _, h := range handlers {
|
||||
hi := event.NotificationHandlerInfo{}
|
||||
hi.SetType(typ)
|
||||
hi.SetScriptHash(scHash)
|
||||
hi.SetHandler(h)
|
||||
|
||||
lis.RegisterNotificationHandler(hi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,6 +321,10 @@ func lookupScriptHashesInNNS(c *cfg) {
|
|||
)
|
||||
|
||||
for _, t := range targets {
|
||||
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
|
||||
continue // ignore proxy contract if notary disabled
|
||||
}
|
||||
|
||||
if emptyHash.Equals(*t.h) {
|
||||
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
|
||||
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
|
||||
|
|
|
@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
s.setControlNetmapStatus(ctrlNetSt)
|
||||
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
|
||||
}
|
||||
|
||||
// sets the current node state to the given value. Subsequent cfg.bootstrap
|
||||
|
@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
|||
|
||||
c.initMorphComponents(ctx)
|
||||
|
||||
initNetmapState(ctx, c)
|
||||
initNetmapState(c)
|
||||
|
||||
server := netmapTransportGRPC.New(
|
||||
netmapService.NewSignService(
|
||||
|
@ -175,43 +175,45 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
func addNewEpochNotificationHandlers(c *cfg) {
|
||||
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
|
||||
addNewEpochNotificationHandler(c, func(ev event.Event) {
|
||||
c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber())
|
||||
})
|
||||
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) {
|
||||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
||||
e := ev.(netmapEvent.NewEpoch).EpochNumber()
|
||||
|
||||
c.updateContractNodeInfo(ctx, e)
|
||||
c.updateContractNodeInfo(e)
|
||||
|
||||
if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.bootstrap(ctx); err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
if err := c.bootstrap(); err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
addNewEpochAsyncNotificationHandler(c, func(_ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(c)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// bootstrapNode adds current node to the Network map.
|
||||
// Must be called after initNetmapService.
|
||||
func bootstrapNode(ctx context.Context, c *cfg) {
|
||||
func bootstrapNode(c *cfg) {
|
||||
if c.needBootstrap() {
|
||||
if c.IsMaintenance() {
|
||||
c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
||||
c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap)
|
||||
return
|
||||
}
|
||||
err := c.bootstrap(ctx)
|
||||
err := c.bootstrap()
|
||||
fatalOnErrDetails("bootstrap error", err)
|
||||
}
|
||||
}
|
||||
|
@ -238,17 +240,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
|
|||
|
||||
// initNetmapState inits current Network map state.
|
||||
// Must be called after Morph components initialization.
|
||||
func initNetmapState(ctx context.Context, c *cfg) {
|
||||
func initNetmapState(c *cfg) {
|
||||
epoch, err := c.cfgNetmap.wrapper.Epoch()
|
||||
fatalOnErrDetails("could not initialize current epoch number", err)
|
||||
|
||||
var ni *netmapSDK.NodeInfo
|
||||
ni, err = c.netmapInitLocalNodeState(ctx, epoch)
|
||||
ni, err = c.netmapInitLocalNodeState(epoch)
|
||||
fatalOnErrDetails("could not init network state", err)
|
||||
|
||||
stateWord := nodeState(ni)
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState,
|
||||
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("state", stateWord),
|
||||
)
|
||||
|
@ -277,7 +279,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
|
|||
return "undefined"
|
||||
}
|
||||
|
||||
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -305,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
|
|||
if nmState != candidateState {
|
||||
// This happens when the node was switched to maintenance without epoch tick.
|
||||
// We expect it to continue staying in maintenance.
|
||||
c.log.Info(ctx, logs.CandidateStatusPriority,
|
||||
c.log.Info(logs.CandidateStatusPriority,
|
||||
zap.String("netmap", nmState),
|
||||
zap.String("candidate", candidateState))
|
||||
}
|
||||
|
@ -351,16 +353,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) {
|
|||
|
||||
var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode")
|
||||
|
||||
func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error {
|
||||
func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
|
||||
switch st {
|
||||
default:
|
||||
return fmt.Errorf("unsupported status %v", st)
|
||||
case control.NetmapStatus_MAINTENANCE:
|
||||
return c.setMaintenanceStatus(ctx, false)
|
||||
return c.setMaintenanceStatus(false)
|
||||
case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE:
|
||||
}
|
||||
|
||||
c.stopMaintenance(ctx)
|
||||
c.stopMaintenance()
|
||||
|
||||
if !c.needBootstrap() {
|
||||
return errRelayBootstrap
|
||||
|
@ -368,12 +370,12 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
|
|||
|
||||
if st == control.NetmapStatus_ONLINE {
|
||||
c.cfgNetmap.reBoostrapTurnedOff.Store(false)
|
||||
return bootstrapOnline(ctx, c)
|
||||
return bootstrapOnline(c)
|
||||
}
|
||||
|
||||
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
|
||||
|
||||
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
|
||||
return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {})
|
||||
}
|
||||
|
||||
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
||||
|
@ -385,11 +387,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
|||
return st, epoch, nil
|
||||
}
|
||||
|
||||
func (c *cfg) ForceMaintenance(ctx context.Context) error {
|
||||
return c.setMaintenanceStatus(ctx, true)
|
||||
func (c *cfg) ForceMaintenance() error {
|
||||
return c.setMaintenanceStatus(true)
|
||||
}
|
||||
|
||||
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
|
||||
func (c *cfg) setMaintenanceStatus(force bool) error {
|
||||
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
|
||||
|
@ -398,10 +400,10 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
|
|||
}
|
||||
|
||||
if err == nil || force {
|
||||
c.startMaintenance(ctx)
|
||||
c.startMaintenance()
|
||||
|
||||
if err == nil {
|
||||
err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance)
|
||||
err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -414,16 +416,13 @@ func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
|
|||
|
||||
// calls UpdatePeerState operation of Netmap contract's client for the local node.
|
||||
// State setter is used to specify node state to switch to.
|
||||
func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error {
|
||||
func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error {
|
||||
var prm nmClient.UpdatePeerPrm
|
||||
prm.SetKey(c.key.PublicKey().Bytes())
|
||||
stateSetter(&prm)
|
||||
|
||||
res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
|
||||
_, err := c.cfgNetmap.wrapper.UpdatePeerState(prm)
|
||||
return err
|
||||
}
|
||||
|
||||
type netInfo struct {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
|
@ -57,19 +58,19 @@ type objectSvc struct {
|
|||
func (c *cfg) MaxObjectSize() uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.Error(err),
|
||||
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
|
||||
return sz
|
||||
}
|
||||
|
||||
func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) {
|
||||
func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
|
||||
return s.put.Put()
|
||||
}
|
||||
|
||||
func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) {
|
||||
func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) {
|
||||
return s.patch.Patch()
|
||||
}
|
||||
|
||||
|
@ -136,6 +137,24 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
type innerRingFetcherWithoutNotary struct {
|
||||
nm *nmClient.Client
|
||||
}
|
||||
|
||||
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
|
||||
keys, err := f.nm.GetInnerRingList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
|
||||
}
|
||||
|
||||
result := make([][]byte, 0, len(keys))
|
||||
for i := range keys {
|
||||
result = append(result, keys[i].Bytes())
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func initObjectService(c *cfg) {
|
||||
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
|
||||
|
||||
|
@ -204,7 +223,7 @@ func initObjectService(c *cfg) {
|
|||
|
||||
func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
|
||||
if policerconfig.UnsafeDisable(c.appCfg) {
|
||||
c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled)
|
||||
c.log.Warn(logs.FrostFSNodePolicerIsDisabled)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -215,7 +234,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
prm.MarkAsGarbage(addr)
|
||||
prm.WithForceRemoval()
|
||||
|
||||
return ls.Inhume(ctx, prm)
|
||||
_, err := ls.Inhume(ctx, prm)
|
||||
return err
|
||||
}
|
||||
|
||||
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
|
||||
|
@ -265,9 +285,10 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
var inhumePrm engine.InhumePrm
|
||||
inhumePrm.MarkAsGarbage(addr)
|
||||
|
||||
if err := ls.Inhume(ctx, inhumePrm); err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.Error(err),
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
}),
|
||||
|
@ -284,8 +305,13 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
}
|
||||
|
||||
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
if c.cfgMorph.client.ProbeNotary() {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
}
|
||||
return &innerRingFetcherWithoutNotary{
|
||||
nm: c.cfgNetmap.wrapper,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -474,7 +500,8 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
|
|||
|
||||
prm.WithTarget(tombstone, addrs...)
|
||||
|
||||
return e.engine.Inhume(ctx, prm)
|
||||
_, err := e.engine.Inhume(ctx, prm)
|
||||
return err
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
|
||||
profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
)
|
||||
|
||||
func initProfilerService(ctx context.Context, c *cfg) {
|
||||
func initProfilerService(c *cfg) {
|
||||
tuneProfilers(c)
|
||||
|
||||
pprof, _ := pprofComponent(c)
|
||||
pprof.init(ctx, c)
|
||||
pprof.init(c)
|
||||
}
|
||||
|
||||
func pprofComponent(c *cfg) (*httpComponent, bool) {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
|
||||
|
@ -10,17 +9,17 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func setRuntimeParameters(ctx context.Context, c *cfg) {
|
||||
func setRuntimeParameters(c *cfg) {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
return
|
||||
}
|
||||
|
||||
memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg)
|
||||
previous := debug.SetMemoryLimit(memLimitBytes)
|
||||
if memLimitBytes != previous {
|
||||
c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated,
|
||||
c.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
zap.Int64("new_value", memLimitBytes),
|
||||
zap.Int64("old_value", previous))
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func initSessionService(c *cfg) {
|
|||
_ = c.privateTokenStore.Close()
|
||||
})
|
||||
|
||||
addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) {
|
||||
addNewEpochNotificationHandler(c, func(ev event.Event) {
|
||||
c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber())
|
||||
})
|
||||
|
||||
|
|
|
@ -13,12 +13,12 @@ import (
|
|||
func initTracing(ctx context.Context, c *cfg) {
|
||||
conf, err := tracingconfig.ToTracingConfig(c.appCfg)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
_, err = tracing.Setup(ctx, *conf)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
|||
defer cancel()
|
||||
err := tracing.Shutdown(ctx) // cfg context cancels before close
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||
}
|
||||
},
|
||||
})
|
||||
|
|
|
@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
|
|||
func initTreeService(c *cfg) {
|
||||
treeConfig := treeconfig.Tree(c.appCfg)
|
||||
if !treeConfig.Enabled() {
|
||||
c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -80,10 +80,10 @@ func initTreeService(c *cfg) {
|
|||
}))
|
||||
|
||||
if d := treeConfig.SyncInterval(); d == 0 {
|
||||
addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
|
@ -94,7 +94,7 @@ func initTreeService(c *cfg) {
|
|||
for range tick.C {
|
||||
err := c.treeService.SynchronizeAll()
|
||||
if err != nil {
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||
if errors.Is(err, tree.ErrShuttingDown) {
|
||||
return
|
||||
}
|
||||
|
@ -103,17 +103,17 @@ func initTreeService(c *cfg) {
|
|||
}()
|
||||
}
|
||||
|
||||
subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) {
|
||||
subscribeToContainerRemoval(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.DeleteSuccess)
|
||||
|
||||
// This is executed asynchronously, so we don't care about the operation taking some time.
|
||||
c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||
err := c.treeService.DropTree(ctx, ev.ID, "")
|
||||
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
zap.Stringer("cid", ev.ID),
|
||||
zap.Error(err))
|
||||
zap.String("error", err.Error()))
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package ape
|
|||
|
||||
const (
|
||||
RuleFlag = "rule"
|
||||
RuleFlagDesc = "Rule statement"
|
||||
PathFlag = "path"
|
||||
PathFlagDesc = "Path to encoded chain in JSON or binary format"
|
||||
TargetNameFlag = "target-name"
|
||||
|
@ -16,64 +17,3 @@ const (
|
|||
ChainNameFlagDesc = "Chain name(ingress|s3)"
|
||||
AllFlag = "all"
|
||||
)
|
||||
|
||||
const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format:
|
||||
<status>[:status_detail] <action>... <condition>... <resource>...
|
||||
|
||||
Status:
|
||||
- allow Permits specified actions
|
||||
- deny Prohibits specified actions
|
||||
- deny:QuotaLimitReached Denies access due to quota limits
|
||||
|
||||
Actions:
|
||||
Object operations:
|
||||
- Object.Put, Object.Get, etc.
|
||||
- Object.* (all object operations)
|
||||
Container operations:
|
||||
- Container.Put, Container.Get, etc.
|
||||
- Container.* (all container operations)
|
||||
|
||||
Conditions:
|
||||
ResourceCondition:
|
||||
Format: ResourceCondition:"key"=value, "key"!=value
|
||||
Reserved properties (use '\' before '$'):
|
||||
- $Object:version
|
||||
- $Object:objectID
|
||||
- $Object:containerID
|
||||
- $Object:ownerID
|
||||
- $Object:creationEpoch
|
||||
- $Object:payloadLength
|
||||
- $Object:payloadHash
|
||||
- $Object:objectType
|
||||
- $Object:homomorphicHash
|
||||
|
||||
RequestCondition:
|
||||
Format: RequestCondition:"key"=value, "key"!=value
|
||||
Reserved properties (use '\' before '$'):
|
||||
- $Actor:publicKey
|
||||
- $Actor:role
|
||||
|
||||
Example:
|
||||
ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others
|
||||
|
||||
Resources:
|
||||
For objects:
|
||||
- namespace/cid/oid (specific object)
|
||||
- namespace/cid/* (all objects in container)
|
||||
- namespace/* (all objects in namespace)
|
||||
- * (all objects)
|
||||
- /* (all objects in root namespace)
|
||||
- /cid/* (all objects in root container)
|
||||
- /cid/oid (specific object in root container)
|
||||
|
||||
For containers:
|
||||
- namespace/cid (specific container)
|
||||
- namespace/* (all containers in namespace)
|
||||
- * (all containers)
|
||||
- /cid (root container)
|
||||
- /* (all root containers)
|
||||
|
||||
Notes:
|
||||
- Cannot mix object and container operations in one rule
|
||||
- Default behavior is Any=false unless 'any' is specified
|
||||
- Use 'all' keyword to explicitly set Any=false`
|
||||
|
|
|
@ -26,15 +26,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
|
|||
_ = iota
|
||||
internal
|
||||
aclDenied
|
||||
apemanagerDenied
|
||||
)
|
||||
|
||||
var (
|
||||
code int
|
||||
|
||||
internalErr = new(sdkstatus.ServerInternal)
|
||||
accessErr = new(sdkstatus.ObjectAccessDenied)
|
||||
apemanagerErr = new(sdkstatus.APEManagerAccessDenied)
|
||||
internalErr = new(sdkstatus.ServerInternal)
|
||||
accessErr = new(sdkstatus.ObjectAccessDenied)
|
||||
)
|
||||
|
||||
switch {
|
||||
|
@ -43,9 +41,6 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) {
|
|||
case errors.As(err, &accessErr):
|
||||
code = aclDenied
|
||||
err = fmt.Errorf("%w: %s", err, accessErr.Reason())
|
||||
case errors.As(err, &apemanagerErr):
|
||||
code = apemanagerDenied
|
||||
err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason())
|
||||
default:
|
||||
code = internal
|
||||
}
|
||||
|
|
|
@ -83,9 +83,6 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
|
|||
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
|
||||
FROSTFS_REPLICATOR_POOL_SIZE=10
|
||||
|
||||
# Container service section
|
||||
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
|
||||
|
||||
# Object service section
|
||||
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||
|
@ -206,10 +203,6 @@ FROSTFS_TRACING_ENABLED=true
|
|||
FROSTFS_TRACING_ENDPOINT="localhost"
|
||||
FROSTFS_TRACING_EXPORTER="otlp_grpc"
|
||||
FROSTFS_TRACING_TRUSTED_CA=""
|
||||
FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0
|
||||
FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value
|
||||
FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1
|
||||
FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value
|
||||
|
||||
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue