forked from TrueCloudLab/frostfs-node
Compare commits
186 commits
fix/ape_lo
...
master
Author | SHA1 | Date | |
---|---|---|---|
4c8f9580a1 | |||
bf8914fedc | |||
5ba0e2918e | |||
4685afb1dc | |||
eb8b9b2b3b | |||
6c6e463b73 | |||
401d96a89e | |||
8ed71a969e | |||
c2d855aedd | |||
2162f8e189 | |||
b9360be1dc | |||
ceff5e1f6a | |||
e0dc3c3d0c | |||
92a67a6716 | |||
98d6125029 | |||
0991077cb3 | |||
c660271039 | |||
92ab58984b | |||
dae0949f6e | |||
5590886599 | |||
f0b2017057 | |||
dce269c62e | |||
a97bded440 | |||
9a0507704a | |||
2ff032db90 | |||
37972a91c1 | |||
003d568ae2 | |||
b2adf1109e | |||
02f3a7f65c | |||
9b29e7392f | |||
fe0cf86dc6 | |||
1bcaa1af1f | |||
304bee938b | |||
b2163ff44c | |||
076952f4c7 | |||
a7145ca9bf | |||
5d79abe523 | |||
0671c277db | |||
92450a76ba | |||
abba5b2089 | |||
fd0c6c461d | |||
bfe325e61d | |||
acec938b2d | |||
dc6aea7b79 | |||
170860c14a | |||
12da2f8262 | |||
f6b3f79e89 | |||
9729f31e5c | |||
155f9eecb0 | |||
69c35b1d61 | |||
9b113c3156 | |||
4de5fca547 | |||
9c4c5a5262 | |||
6fcae9f75a | |||
1df64c5cab | |||
6a580db55e | |||
24054cf6f4 | |||
9ee3dd4e91 | |||
78bfd12229 | |||
57dc0a8e9e | |||
b309b34bfc | |||
c8acdf40bb | |||
6410542d19 | |||
c0a341a7f6 | |||
e1a984e9d8 | |||
abfd9657f9 | |||
a788d44773 | |||
603015d029 | |||
30e14d50ef | |||
951a7ee1c7 | |||
0bcbeb26b2 | |||
c98357606b | |||
80de5d70bf | |||
57efa0bc8e | |||
26e0c82fb8 | |||
4538ccb12a | |||
84e1599997 | |||
5a270e2e61 | |||
436d65d784 | |||
c3c034ecca | |||
05fd999162 | |||
eff95bd632 | |||
fb928616cc | |||
4d5ae59a52 | |||
a9f27e074b | |||
6c51f48aab | |||
a2485637bb | |||
09faca034c | |||
ceac1c8709 | |||
f7e75b13b0 | |||
198aaebc94 | |||
85af6bcd5c | |||
8a658de0b2 | |||
3900b92927 | |||
5ccb3394b4 | |||
dc410fca90 | |||
cddcd73f04 | |||
d7fcc5ce30 | |||
c0221d76e6 | |||
242f0095d0 | |||
6fe34d266a | |||
fa08bfa553 | |||
0da998ef50 | |||
e44782473a | |||
9cd1bcef06 | |||
ca0a33ea0f | |||
f6c5222952 | |||
ea868e09f8 | |||
31d3d299bf | |||
b5b4f78b49 | |||
2832f44437 | |||
7c3bcb0f44 | |||
e64871c3fd | |||
303cd35a01 | |||
bb9ba1bce2 | |||
db03742d33 | |||
148d68933b | |||
51ee132ea3 | |||
226dd25dd0 | |||
bd0197eaa8 | |||
e44b84c18c | |||
bed49e6ace | |||
df05057ed4 | |||
b6c8ebf493 | |||
6e82661c35 | |||
1a091ea7bb | |||
7ac3542714 | |||
f0c43c8d80 | |||
8ba9f31fca | |||
2af3409d39 | |||
d165ac042c | |||
7151c71d51 | |||
91d9dc2676 | |||
7853dbc315 | |||
3821645085 | |||
72470d6b48 | |||
e9837bbcf9 | |||
a641c91594 | |||
b1614a284d | |||
d0ce835fbf | |||
dfa51048a8 | |||
670305a721 | |||
1f6cf57e30 | |||
386a12eea4 | |||
15139d80c9 | |||
41da27dad5 | |||
ac0511d214 | |||
7e542906ef | |||
d1bc4351c3 | |||
1c12f23b84 | |||
a353d45742 | |||
d5c46d812a | |||
d5d5ce2074 | |||
7df3520d48 | |||
5fe78e51d1 | |||
84b4051b4d | |||
6a51086030 | |||
5c3b2d95ba | |||
2d5d4093be | |||
e3487d5af5 | |||
e37dcdf88b | |||
6c679d1535 | |||
281d65435e | |||
b348b20289 | |||
748edd1999 | |||
47dfd8840c | |||
432042c534 | |||
9cabca9dfe | |||
60feed3b5f | |||
635a292ae4 | |||
edfa3f4825 | |||
e0ac3a583f | |||
00c608c05e | |||
bba1892fa1 | |||
01acec708f | |||
aac65001e5 | |||
1170370753 | |||
9e275d44c8 | |||
2469e0c683 | |||
a6ef4ab524 | |||
49959c4166 | |||
61ee1b5610 | |||
b10c954377 | |||
1605391628 | |||
b1766e47c7 | |||
caa4253249 |
409 changed files with 6303 additions and 4064 deletions
28
.forgejo/workflows/oci-image.yml
Normal file
28
.forgejo/workflows/oci-image.yml
Normal file
|
@ -0,0 +1,28 @@
|
|||
name: OCI image
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
image:
|
||||
name: Build container images
|
||||
runs-on: docker
|
||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
||||
steps:
|
||||
- name: Clone git repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Build OCI image
|
||||
run: make images
|
||||
|
||||
- name: Push image to OCI registry
|
||||
run: |
|
||||
echo "$REGISTRY_PASSWORD" \
|
||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
||||
make push-images
|
||||
if: >-
|
||||
startsWith(github.ref, 'refs/tags/v') &&
|
||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
||||
env:
|
||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
@ -19,6 +19,7 @@ jobs:
|
|||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
|
|
@ -89,5 +89,7 @@ linters:
|
|||
- protogetter
|
||||
- intrange
|
||||
- tenv
|
||||
- unconvert
|
||||
- unparam
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
|
3
CODEOWNERS
Normal file
3
CODEOWNERS
Normal file
|
@ -0,0 +1,3 @@
|
|||
.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
21
Makefile
21
Makefile
|
@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.62.0
|
||||
LINT_VERSION ?= 1.62.2
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.8
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go)
|
||||
|
@ -139,6 +139,15 @@ images: image-storage image-ir image-cli image-adm
|
|||
# Build dirty local Docker images
|
||||
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
|
||||
|
||||
# Push FrostFS components' docker image to the registry
|
||||
push-image-%:
|
||||
@echo "⇒ Publish FrostFS $* docker image "
|
||||
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
|
||||
|
||||
# Push all Docker images to the registry
|
||||
.PHONY: push-images
|
||||
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
|
||||
|
||||
# Run `make %` in Golang container
|
||||
docker/%:
|
||||
docker run --rm -t \
|
||||
|
@ -270,10 +279,12 @@ env-up: all
|
|||
echo "Frostfs contracts not found"; exit 1; \
|
||||
fi
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
|
||||
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
|
||||
--storage-wallet ./dev/storage/wallet01.json \
|
||||
--storage-wallet ./dev/storage/wallet02.json \
|
||||
--storage-wallet ./dev/storage/wallet03.json \
|
||||
--storage-wallet ./dev/storage/wallet04.json
|
||||
|
||||
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
|
||||
make locode-download; \
|
||||
fi
|
||||
|
|
|
@ -16,9 +16,16 @@ const (
|
|||
EndpointFlagDesc = "N3 RPC node endpoint"
|
||||
EndpointFlagShort = "r"
|
||||
|
||||
WalletPath = "wallet"
|
||||
WalletPathShorthand = "w"
|
||||
WalletPathUsage = "Path to the wallet"
|
||||
|
||||
AlphabetWalletsFlag = "alphabet-wallets"
|
||||
AlphabetWalletsFlagDesc = "Path to alphabet wallets dir"
|
||||
|
||||
AdminWalletPath = "wallet-admin"
|
||||
AdminWalletUsage = "Path to the admin wallet"
|
||||
|
||||
LocalDumpFlag = "local-dump"
|
||||
ProtoConfigPath = "protocol"
|
||||
ContractsInitFlag = "contracts"
|
||||
|
|
|
@ -28,6 +28,7 @@ const (
|
|||
var (
|
||||
errNoPathsFound = errors.New("no metabase paths found")
|
||||
errNoMorphEndpointsFound = errors.New("no morph endpoints found")
|
||||
errUpgradeFailed = errors.New("upgrade failed")
|
||||
)
|
||||
|
||||
var UpgradeCmd = &cobra.Command{
|
||||
|
@ -91,14 +92,19 @@ func upgrade(cmd *cobra.Command, _ []string) error {
|
|||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
allSuccess := true
|
||||
for mb, ok := range result {
|
||||
if ok {
|
||||
cmd.Println(mb, ": success")
|
||||
} else {
|
||||
cmd.Println(mb, ": failed")
|
||||
allSuccess = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
if allSuccess {
|
||||
return nil
|
||||
}
|
||||
return errUpgradeFailed
|
||||
}
|
||||
|
||||
func getMetabasePaths(appCfg *config.Config) ([]string, error) {
|
||||
|
@ -135,7 +141,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("resolve container contract hash: %w", err)
|
||||
}
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
|
||||
cc, err := morphcontainer.NewFromMorph(cli, sh, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create morph container client: %w", err)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@ package ape
|
|||
import (
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -53,16 +55,15 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke {
|
|||
}
|
||||
|
||||
func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
var ch util.Uint160
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err := helper.GetContractByID(r, 1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err)
|
||||
|
||||
ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract))
|
||||
commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err)
|
||||
|
||||
invokerAdapter := &invokerAdapter{
|
||||
|
@ -74,10 +75,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag
|
|||
}
|
||||
|
||||
func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName)
|
||||
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
|
||||
ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName})
|
||||
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
|
||||
|
||||
var ch util.Uint160
|
||||
|
|
|
@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
nmHash util.Uint160
|
||||
)
|
||||
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
const forceConfigSet = "force"
|
||||
|
||||
func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("invalid filename: %w", err)
|
||||
}
|
||||
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
|
|||
}
|
||||
|
||||
func listContainers(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type contractDumpInfo struct {
|
|||
}
|
||||
|
||||
func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
|
|
@ -39,6 +39,11 @@ const (
|
|||
groupIDFlag = "group-id"
|
||||
|
||||
rootNamespacePlaceholder = "<root>"
|
||||
|
||||
keyFlag = "key"
|
||||
keyDescFlag = "Key for storing a value in the subject's KV storage"
|
||||
valueFlag = "value"
|
||||
valueDescFlag = "Value to be stored in the subject's KV storage"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -152,6 +157,23 @@ var (
|
|||
},
|
||||
Run: frostfsidListGroupSubjects,
|
||||
}
|
||||
|
||||
frostfsidSetKVCmd = &cobra.Command{
|
||||
Use: "set-kv",
|
||||
Short: "Store a key-value pair in the subject's KV storage",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidSetKV,
|
||||
}
|
||||
frostfsidDeleteKVCmd = &cobra.Command{
|
||||
Use: "delete-kv",
|
||||
Short: "Delete a value from the subject's KV storage",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
},
|
||||
Run: frostfsidDeleteKV,
|
||||
}
|
||||
)
|
||||
|
||||
func initFrostfsIDCreateNamespaceCmd() {
|
||||
|
@ -237,6 +259,21 @@ func initFrostfsIDListGroupSubjectsCmd() {
|
|||
frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)")
|
||||
}
|
||||
|
||||
func initFrostfsIDSetKVCmd() {
|
||||
Cmd.AddCommand(frostfsidSetKVCmd)
|
||||
frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
|
||||
frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag)
|
||||
frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag)
|
||||
}
|
||||
|
||||
func initFrostfsIDDeleteKVCmd() {
|
||||
Cmd.AddCommand(frostfsidDeleteKVCmd)
|
||||
frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address")
|
||||
frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag)
|
||||
}
|
||||
|
||||
func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) {
|
||||
ns := getFrostfsIDNamespace(cmd)
|
||||
|
||||
|
@ -254,7 +291,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) {
|
|||
reader := frostfsidrpclient.NewReader(inv, hash)
|
||||
sessionID, it, err := reader.ListNamespaces()
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
namespaces, err := frostfsidclient.ParseNamespaces(items)
|
||||
|
@ -306,7 +343,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListNamespaceSubjects(ns)
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
|
||||
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID))
|
||||
subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID))
|
||||
commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err)
|
||||
|
||||
sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) })
|
||||
|
@ -320,7 +357,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListSubjects()
|
||||
commonCmd.ExitOnErr(cmd, "can't get subject: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
subj, err := frostfsidclient.ParseSubject(items)
|
||||
|
@ -366,7 +403,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListGroups(ns)
|
||||
commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
|
||||
groups, err := frostfsidclient.ParseGroups(items)
|
||||
commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err)
|
||||
|
@ -404,6 +441,45 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err)
|
||||
}
|
||||
|
||||
func frostfsidSetKV(cmd *cobra.Command, _ []string) {
|
||||
subjectAddress := getFrostfsIDSubjectAddress(cmd)
|
||||
key, _ := cmd.Flags().GetString(keyFlag)
|
||||
value, _ := cmd.Flags().GetString(valueFlag)
|
||||
|
||||
if key == "" {
|
||||
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
|
||||
}
|
||||
|
||||
ffsid, err := newFrostfsIDClient(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
|
||||
|
||||
method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value)
|
||||
|
||||
ffsid.addCall(method, args)
|
||||
|
||||
err = ffsid.sendWait()
|
||||
commonCmd.ExitOnErr(cmd, "set KV: %w", err)
|
||||
}
|
||||
|
||||
func frostfsidDeleteKV(cmd *cobra.Command, _ []string) {
|
||||
subjectAddress := getFrostfsIDSubjectAddress(cmd)
|
||||
key, _ := cmd.Flags().GetString(keyFlag)
|
||||
|
||||
if key == "" {
|
||||
commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty"))
|
||||
}
|
||||
|
||||
ffsid, err := newFrostfsIDClient(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "init contract client: %w", err)
|
||||
|
||||
method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key)
|
||||
|
||||
ffsid.addCall(method, args)
|
||||
|
||||
err = ffsid.sendWait()
|
||||
commonCmd.ExitOnErr(cmd, "delete KV: %w", err)
|
||||
}
|
||||
|
||||
func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
||||
ns := getFrostfsIDNamespace(cmd)
|
||||
groupID := getFrostfsIDGroupID(cmd)
|
||||
|
@ -416,7 +492,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) {
|
|||
sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID))
|
||||
commonCmd.ExitOnErr(cmd, "can't list groups: %w", err)
|
||||
|
||||
items, err := readIterator(inv, &it, iteratorBatchSize, sessionID)
|
||||
items, err := readIterator(inv, &it, sessionID)
|
||||
commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err)
|
||||
|
||||
subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err)
|
||||
|
@ -489,32 +565,28 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) {
|
|||
}
|
||||
f.bw.Reset()
|
||||
|
||||
if len(f.wCtx.SentTxs) == 0 {
|
||||
return nil, errors.New("no transactions to wait")
|
||||
}
|
||||
|
||||
f.wCtx.Command.Println("Waiting for transactions to persist...")
|
||||
return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil)
|
||||
}
|
||||
|
||||
func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) {
|
||||
func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) {
|
||||
var shouldStop bool
|
||||
res := make([]stackitem.Item, 0)
|
||||
for !shouldStop {
|
||||
items, err := inv.TraverseIterator(sessionID, iter, batchSize)
|
||||
items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = append(res, items...)
|
||||
shouldStop = len(items) < batchSize
|
||||
shouldStop = len(items) < iteratorBatchSize
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -1,59 +1,12 @@
|
|||
package frostfsid
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := helper.GetFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNamespaceRegexp(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
|
|
@ -12,6 +12,8 @@ func init() {
|
|||
initFrostfsIDAddSubjectToGroupCmd()
|
||||
initFrostfsIDRemoveSubjectFromGroupCmd()
|
||||
initFrostfsIDListGroupSubjectsCmd()
|
||||
initFrostfsIDSetKVCmd()
|
||||
initFrostfsIDDeleteKVCmd()
|
||||
initFrostfsIDAddSubjectKeyCmd()
|
||||
initFrostfsIDRemoveSubjectKeyCmd()
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
|
@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
|
|||
}
|
||||
|
||||
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
|
||||
return refillGas(cmd, storageGasConfigFlag, true)
|
||||
}
|
||||
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
|
||||
// storage wallet path is not part of the config
|
||||
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
// wallet address is not part of the config
|
||||
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
|
||||
|
||||
var gasReceiver util.Uint160
|
||||
|
||||
if len(walletAddress) != 0 {
|
||||
gasReceiver, err = address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
} else {
|
||||
if storageWalletPath == "" {
|
||||
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
var w *wallet.Wallet
|
||||
|
||||
if createWallet {
|
||||
w, err = wallet.NewWallet(storageWalletPath)
|
||||
} else {
|
||||
w, err = wallet.NewWalletFromFile(storageWalletPath)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
if createWallet {
|
||||
var password string
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
gasReceiver = w.Accounts[0].Contract.ScriptHash()
|
||||
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
|
||||
w, err := wallet.NewWallet(walletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create wallet: %w", err)
|
||||
}
|
||||
|
||||
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
|
||||
password, err := config.GetStoragePassword(viper.GetViper(), label)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
if label == "" {
|
||||
label = constants.SingleAccountName
|
||||
}
|
||||
|
||||
if err := w.CreateAccount(label, password); err != nil {
|
||||
return fmt.Errorf("can't create account: %w", err)
|
||||
}
|
||||
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
|
||||
}
|
||||
|
||||
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
|
||||
gasStr := viper.GetString(gasFlag)
|
||||
|
||||
gasAmount, err := helper.ParseGASAmount(gasStr)
|
||||
|
@ -208,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
for _, gasReceiver := range gasReceivers {
|
||||
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
|
||||
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
|
||||
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
|
||||
}
|
||||
if bw.Err != nil {
|
||||
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -33,7 +38,27 @@ var (
|
|||
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
|
||||
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
|
||||
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
|
||||
|
||||
var gasReceivers []util.Uint160
|
||||
for _, walletAddress := range walletAddresses {
|
||||
addr, err := address.StringToUint160(walletAddress)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, addr)
|
||||
}
|
||||
for _, storageWalletPath := range storageWalletPaths {
|
||||
w, err := wallet.NewWalletFromFile(storageWalletPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create wallet: %w", err)
|
||||
}
|
||||
|
||||
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
|
||||
}
|
||||
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
|
||||
},
|
||||
}
|
||||
GenerateAlphabetCmd = &cobra.Command{
|
||||
|
@ -50,10 +75,10 @@ var (
|
|||
func initRefillGasCmd() {
|
||||
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
|
||||
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
|
||||
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
|
||||
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
|
||||
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
|
||||
}
|
||||
|
||||
func initGenerateStorageCmd() {
|
||||
|
|
|
@ -3,9 +3,6 @@ package helper
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
|
@ -16,7 +13,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
@ -28,32 +24,86 @@ type LocalActor struct {
|
|||
rpcInvoker invoker.RPCInvoke
|
||||
}
|
||||
|
||||
type AlphabetWallets struct {
|
||||
Label string
|
||||
Path string
|
||||
}
|
||||
|
||||
func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) {
|
||||
w, err := GetAlphabetWallets(v, a.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var accounts []*wallet.Account
|
||||
for _, wall := range w {
|
||||
acc, err := GetWalletAccount(wall, a.Label)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accounts = append(accounts, acc)
|
||||
}
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
type RegularWallets struct{ Path string }
|
||||
|
||||
func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) {
|
||||
w, err := getRegularWallet(r.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil
|
||||
}
|
||||
|
||||
// NewLocalActor create LocalActor with accounts form provided wallets.
|
||||
// In case of empty wallets provided created actor with dummy account only for read operation.
|
||||
//
|
||||
// If wallets are provided, the contract client will use accounts with accName name from these wallets.
|
||||
// To determine which account name should be used in a contract client, refer to how the contract
|
||||
// verifies the transaction signature.
|
||||
func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) {
|
||||
walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag))
|
||||
func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) {
|
||||
var act *actor.Actor
|
||||
var accounts []*wallet.Account
|
||||
var signers []actor.SignerAccount
|
||||
|
||||
wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir)
|
||||
commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err)
|
||||
if alphabet != nil {
|
||||
account, err := alphabet.GetAccount(viper.GetViper())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, accName)
|
||||
commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err)
|
||||
accounts = append(accounts, acc)
|
||||
accounts = append(accounts, account...)
|
||||
signers = append(signers, actor.SignerAccount{
|
||||
Signer: transaction.Signer{
|
||||
Account: account[0].Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: account[0],
|
||||
})
|
||||
}
|
||||
act, err = actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: accounts[0].Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: accounts[0],
|
||||
}})
|
||||
|
||||
for _, w := range regularWallets {
|
||||
if w == nil {
|
||||
continue
|
||||
}
|
||||
account, err := w.GetAccount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accounts = append(accounts, account...)
|
||||
signers = append(signers, actor.SignerAccount{
|
||||
Signer: transaction.Signer{
|
||||
Account: account[0].Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: account[0],
|
||||
})
|
||||
}
|
||||
|
||||
act, err := actor.New(c, signers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any
|
|||
h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker)
|
||||
}
|
||||
if method != constants.UpdateMethodName || err == nil && !found {
|
||||
h, found, err = GetFrostfsIDAdmin(viper.GetViper())
|
||||
h, found, err = getFrostfsIDAdmin(viper.GetViper())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
const frostfsIDAdminConfigKey = "frostfsid.admin"
|
||||
|
||||
func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) {
|
||||
admin := v.GetString(frostfsIDAdminConfigKey)
|
||||
if admin == "" {
|
||||
return util.Uint160{}, false, nil
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
fmts := []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
}
|
||||
|
||||
for i := range fmts {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", fmts[i])
|
||||
|
||||
actual, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.admin", "abc")
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.Error(t, err)
|
||||
require.True(t, found)
|
||||
})
|
||||
t.Run("missing key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
|
||||
_, found, err := getFrostfsIDAdmin(v)
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
})
|
||||
}
|
|
@ -134,12 +134,12 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex
|
|||
return nil, err
|
||||
}
|
||||
|
||||
accounts, err := createWalletAccounts(wallets)
|
||||
accounts, err := getSingleAccounts(wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cliCtx, err := DefaultClientContext(c, committeeAcc)
|
||||
cliCtx, err := defaultClientContext(c, committeeAcc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("client context: %w", err)
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet)
|
|||
}
|
||||
c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String())
|
||||
} else {
|
||||
c, err = GetN3Client(v)
|
||||
c, err = NewRemoteClient(v)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create N3 client: %w", err)
|
||||
|
@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) {
|
|||
return ctrPath, nil
|
||||
}
|
||||
|
||||
func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i, w := range wallets {
|
||||
acc, err := GetWalletAccount(w, constants.SingleAccountName)
|
||||
|
|
|
@ -58,17 +58,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
return nil, err
|
||||
}
|
||||
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount))
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
go bc.Run()
|
||||
|
||||
accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
if err := restoreDump(bc, dumpPath); err != nil {
|
||||
return nil, fmt.Errorf("restore dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func restoreDump(bc *core.Blockchain, dumpPath string) error {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) {
|
||||
accounts := make([]*wallet.Account, len(wallets))
|
||||
for i := range accounts {
|
||||
acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accounts[i] = acc
|
||||
}
|
||||
|
||||
indexMap := make(map[string]int)
|
||||
for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee {
|
||||
for i, pub := range cfg.StandbyCommittee {
|
||||
indexMap[pub] = i
|
||||
}
|
||||
|
||||
|
@ -77,37 +119,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
pj := accounts[j].PrivateKey().PublicKey().Bytes()
|
||||
return indexMap[string(pi)] < indexMap[string(pj)]
|
||||
})
|
||||
sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool {
|
||||
sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool {
|
||||
return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1
|
||||
})
|
||||
|
||||
go bc.Run()
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := io.NewBinReaderFromIO(f)
|
||||
|
||||
var skip uint32
|
||||
if bc.BlockHeight() != 0 {
|
||||
skip = bc.BlockHeight() + 1
|
||||
}
|
||||
|
||||
count := r.ReadU32LE() - skip
|
||||
if err := chaindump.Restore(bc, r, skip, count, nil); err != nil {
|
||||
return nil, fmt.Errorf("can't restore local dump: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &LocalClient{
|
||||
bc: bc,
|
||||
dumpPath: dumpPath,
|
||||
accounts: accounts[:m],
|
||||
}, nil
|
||||
m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount))
|
||||
return accounts[:m], nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetBlockCount() (uint32, error) {
|
||||
|
@ -128,11 +145,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
|
|||
return &a, nil
|
||||
}
|
||||
|
||||
func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
// InvokeFunction is implemented via `InvokeScript`.
|
||||
func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) {
|
||||
var err error
|
||||
|
@ -296,13 +308,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer)
|
|||
}
|
||||
|
||||
func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) {
|
||||
// We need to test that transaction was formed correctly to catch as many errors as we can.
|
||||
bs := tx.Bytes()
|
||||
_, err := transaction.NewTransactionFromBytes(bs)
|
||||
if err != nil {
|
||||
return tx.Hash(), fmt.Errorf("invalid transaction: %w", err)
|
||||
}
|
||||
|
||||
tx = tx.Copy()
|
||||
l.transactions = append(l.transactions, tx)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
|
@ -25,15 +24,10 @@ import (
|
|||
// Client represents N3 client interface capable of test-invoking scripts
|
||||
// and sending signed transactions to chain.
|
||||
type Client interface {
|
||||
invoker.RPCInvoke
|
||||
actor.RPCActor
|
||||
|
||||
GetBlockCount() (uint32, error)
|
||||
GetNativeContracts() ([]state.Contract, error)
|
||||
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
|
||||
GetVersion() (*result.Version, error)
|
||||
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
|
||||
GetCommittee() (keys.PublicKeys, error)
|
||||
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
|
||||
}
|
||||
|
||||
type HashVUBPair struct {
|
||||
|
@ -48,7 +42,7 @@ type ClientContext struct {
|
|||
SentTxs []HashVUBPair
|
||||
}
|
||||
|
||||
func GetN3Client(v *viper.Viper) (Client, error) {
|
||||
func NewRemoteClient(v *viper.Viper) (Client, error) {
|
||||
// number of opened connections
|
||||
// by neo-go client per one host
|
||||
const (
|
||||
|
@ -88,8 +82,14 @@ func GetN3Client(v *viper.Viper) (Client, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := NewActor(c, committeeAcc)
|
||||
func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) {
|
||||
commAct, err := actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -14,16 +14,36 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func getRegularWallet(walletPath string) (*wallet.Wallet, error) {
|
||||
w, err := wallet.NewWalletFromFile(walletPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
password, err := input.ReadPassword("Enter password for wallet:")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
for i := range w.Accounts {
|
||||
if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil {
|
||||
err = fmt.Errorf("can't unlock wallet: %w", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return w, err
|
||||
}
|
||||
|
||||
func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) {
|
||||
wallets, err := openAlphabetWallets(v, walletDir)
|
||||
if err != nil {
|
||||
|
@ -53,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
|||
if errors.Is(err, os.ErrNotExist) {
|
||||
err = nil
|
||||
} else {
|
||||
err = fmt.Errorf("can't open wallet: %w", err)
|
||||
err = fmt.Errorf("can't open alphabet wallet: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
@ -87,16 +107,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
|||
return wallets, nil
|
||||
}
|
||||
|
||||
func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) {
|
||||
return actor.New(c, []actor.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: committeeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.Global,
|
||||
},
|
||||
Account: committeeAcc,
|
||||
}})
|
||||
}
|
||||
|
||||
func ReadContract(ctrPath, ctrName string) (*ContractState, error) {
|
||||
rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef"))
|
||||
if err != nil {
|
||||
|
|
|
@ -22,15 +22,14 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
gasInitialTotalSupply = 30000000 * native.GASFactor
|
||||
// initialAlphabetGASAmount represents the amount of GAS given to each alphabet node.
|
||||
initialAlphabetGASAmount = 10_000 * native.GASFactor
|
||||
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
|
||||
initialProxyGASAmount = 50_000 * native.GASFactor
|
||||
)
|
||||
|
||||
func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
|
||||
return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
||||
func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 {
|
||||
return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
|
||||
}
|
||||
|
||||
func transferFunds(c *helper.InitializeContext) error {
|
||||
|
@ -42,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
return err
|
||||
}
|
||||
|
||||
version, err := c.Client.GetVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var transfers []transferTarget
|
||||
for _, acc := range c.Accounts {
|
||||
to := acc.Contract.ScriptHash()
|
||||
|
@ -59,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
transferTarget{
|
||||
Token: gas.Hash,
|
||||
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
||||
Amount: initialCommitteeGASAmount(c),
|
||||
Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)),
|
||||
},
|
||||
transferTarget{
|
||||
Token: neo.Hash,
|
||||
|
@ -83,16 +87,23 @@ func transferFunds(c *helper.InitializeContext) error {
|
|||
// transferFundsFinished checks balances of accounts we transfer GAS to.
|
||||
// The stage is considered finished if the balance is greater than the half of what we need to transfer.
|
||||
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
|
||||
acc := c.Accounts[0]
|
||||
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
||||
res, err := r.BalanceOf(acc.Contract.ScriptHash())
|
||||
if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
|
||||
res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
version, err := c.Client.GetVersion()
|
||||
if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 {
|
||||
return false, err
|
||||
}
|
||||
|
||||
res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
|
||||
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err
|
||||
}
|
||||
|
||||
func transferGASToProxy(c *helper.InitializeContext) error {
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -6,7 +6,9 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func initRegisterCmd() {
|
||||
|
@ -19,6 +21,7 @@ func initRegisterCmd() {
|
|||
registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter")
|
||||
registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter")
|
||||
registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter")
|
||||
registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
|
||||
_ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag)
|
||||
}
|
||||
|
@ -48,6 +51,7 @@ func initDeleteCmd() {
|
|||
deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
|
||||
_ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
|
||||
}
|
||||
|
@ -62,3 +66,28 @@ func deleteDomain(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
|
||||
cmd.Println("Domain deleted successfully")
|
||||
}
|
||||
|
||||
func initSetAdminCmd() {
|
||||
Cmd.AddCommand(setAdminCmd)
|
||||
setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage)
|
||||
_ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath)
|
||||
|
||||
_ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag)
|
||||
}
|
||||
|
||||
func setAdmin(cmd *cobra.Command, _ []string) {
|
||||
c, actor := nnsWriter(cmd)
|
||||
|
||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||
w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath))
|
||||
commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err)
|
||||
h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash())
|
||||
|
||||
_, err = actor.Wait(h, vub, err)
|
||||
commonCmd.ExitOnErr(cmd, "Set admin error: %w", err)
|
||||
cmd.Println("Set admin successfully")
|
||||
}
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
package nns
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -13,10 +17,35 @@ import (
|
|||
|
||||
func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
||||
v := viper.GetViper()
|
||||
c, err := helper.GetN3Client(v)
|
||||
c, err := helper.NewRemoteClient(v)
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName)
|
||||
alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag))
|
||||
walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath))
|
||||
adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath))
|
||||
|
||||
var (
|
||||
alphabet *helper.AlphabetWallets
|
||||
regularWallets []*helper.RegularWallets
|
||||
)
|
||||
|
||||
if alphabetWalletPath != "" {
|
||||
alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName}
|
||||
}
|
||||
|
||||
if walletPath != "" {
|
||||
regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath})
|
||||
}
|
||||
|
||||
if adminWalletPath != "" {
|
||||
regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath})
|
||||
}
|
||||
|
||||
if alphabet == nil && regularWallets == nil {
|
||||
commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided"))
|
||||
}
|
||||
|
||||
ac, err := helper.NewLocalActor(c, alphabet, regularWallets...)
|
||||
commonCmd.ExitOnErr(cmd, "can't create actor: %w", err)
|
||||
|
||||
r := management.NewReader(ac.Invoker)
|
||||
|
@ -26,7 +55,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) {
|
|||
}
|
||||
|
||||
func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -19,6 +19,7 @@ func initAddRecordCmd() {
|
|||
addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
|
||||
addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
|
||||
addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
|
||||
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag)
|
||||
_ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag)
|
||||
|
@ -40,6 +41,7 @@ func initDelRecordsCmd() {
|
|||
delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
|
||||
delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
|
||||
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag)
|
||||
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
|
||||
|
@ -52,6 +54,7 @@ func initDelRecordCmd() {
|
|||
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
|
||||
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
|
||||
delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage)
|
||||
|
||||
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
|
||||
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
|
||||
|
|
|
@ -39,6 +39,7 @@ var (
|
|||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
},
|
||||
Run: registerDomain,
|
||||
}
|
||||
|
@ -48,6 +49,7 @@ var (
|
|||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
},
|
||||
Run: deleteDomain,
|
||||
}
|
||||
|
@ -75,6 +77,7 @@ var (
|
|||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
},
|
||||
Run: addRecord,
|
||||
}
|
||||
|
@ -92,6 +95,7 @@ var (
|
|||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
},
|
||||
Run: delRecords,
|
||||
}
|
||||
|
@ -101,9 +105,21 @@ var (
|
|||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
},
|
||||
Run: delRecord,
|
||||
}
|
||||
setAdminCmd = &cobra.Command{
|
||||
Use: "set-admin",
|
||||
Short: "Sets admin for domain",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath))
|
||||
},
|
||||
Run: setAdmin,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -116,4 +132,5 @@ func init() {
|
|||
initGetRecordsCmd()
|
||||
initDelRecordsCmd()
|
||||
initDelRecordCmd()
|
||||
initSetAdminCmd()
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
|
||||
|
@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
accHash := w.GetChangeAddress()
|
||||
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
|
||||
addr, _ := cmd.Flags().GetString(walletAccountFlag)
|
||||
if addr != "" {
|
||||
accHash, err = address.StringToUint160(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid address: %s", addr)
|
||||
|
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("can't find account for %s", accHash)
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
|
||||
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
|
||||
pass, err := input.ReadPassword(prompt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get password: %v", err)
|
||||
|
@ -73,23 +73,16 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
till := int64(defaultNotaryDepositLifetime)
|
||||
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if tillStr != "" {
|
||||
till, err = strconv.ParseInt(tillStr, 10, 64)
|
||||
if err != nil || till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
}
|
||||
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
|
||||
if till <= 0 {
|
||||
return errInvalidNotaryDepositLifetime
|
||||
}
|
||||
|
||||
return transferGas(cmd, acc, accHash, gasAmount, till)
|
||||
}
|
||||
|
||||
func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
|
|||
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
|
||||
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
|
||||
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
|
||||
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
|
||||
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
||||
c, err := helper.GetN3Client(viper.GetViper())
|
||||
c, err := helper.NewRemoteClient(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
|
|
|
@ -20,23 +20,32 @@ const (
|
|||
accountAddressFlag = "account"
|
||||
)
|
||||
|
||||
func parseAddresses(cmd *cobra.Command) []util.Uint160 {
|
||||
var addrs []util.Uint160
|
||||
|
||||
accs, _ := cmd.Flags().GetStringArray(accountAddressFlag)
|
||||
for _, acc := range accs {
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func addProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "addAccount")
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "addAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func removeProxyAccount(cmd *cobra.Command, _ []string) {
|
||||
acc, _ := cmd.Flags().GetString(accountAddressFlag)
|
||||
addr, err := address.StringToUint160(acc)
|
||||
commonCmd.ExitOnErr(cmd, "invalid account: %w", err)
|
||||
err = processAccount(cmd, addr, "removeAccount")
|
||||
addrs := parseAddresses(cmd)
|
||||
err := processAccount(cmd, addrs, "removeAccount")
|
||||
commonCmd.ExitOnErr(cmd, "processing error: %w", err)
|
||||
}
|
||||
|
||||
func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error {
|
||||
func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error {
|
||||
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't initialize context: %w", err)
|
||||
|
@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error
|
|||
}
|
||||
|
||||
bw := io.NewBufBinWriter()
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
for _, addr := range addrs {
|
||||
emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr)
|
||||
}
|
||||
|
||||
if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil {
|
||||
return err
|
||||
|
|
|
@ -29,13 +29,15 @@ var (
|
|||
|
||||
func initProxyAddAccount() {
|
||||
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
func initProxyRemoveAccount() {
|
||||
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
|
||||
RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string")
|
||||
_ = AddAccountCmd.MarkFlagRequired(accountAddressFlag)
|
||||
RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
@ -105,7 +106,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
|
|||
fatalOnErr(errors.New("can't find account in wallet"))
|
||||
}
|
||||
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
|
||||
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||
fatalOnErr(err)
|
||||
|
||||
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||
|
@ -410,8 +411,7 @@ func initClient(rpc []string) *rpcclient.Client {
|
|||
var c *rpcclient.Client
|
||||
var err error
|
||||
|
||||
shuffled := make([]string, len(rpc))
|
||||
copy(shuffled, rpc)
|
||||
shuffled := slices.Clone(rpc)
|
||||
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||
|
||||
for _, endpoint := range shuffled {
|
||||
|
|
|
@ -9,8 +9,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
|
@ -78,13 +76,29 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
|||
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||
list := x.cliRes.Containers()
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||
return strings.Compare(lhs, rhs) < 0
|
||||
})
|
||||
slices.SortFunc(list, cid.ID.Cmp)
|
||||
return list
|
||||
}
|
||||
|
||||
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
|
||||
cliPrm := &client.PrmContainerListStream{
|
||||
XHeaders: prm.XHeaders,
|
||||
OwnerID: prm.OwnerID,
|
||||
Session: prm.Session,
|
||||
}
|
||||
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init container list: %w", err)
|
||||
}
|
||||
|
||||
err = rdr.Iterate(processCnr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read container list: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// PutContainerPrm groups parameters of PutContainer operation.
|
||||
type PutContainerPrm struct {
|
||||
Client *client.Client
|
||||
|
@ -670,9 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
|||
return nil, fmt.Errorf("read object list: %w", err)
|
||||
}
|
||||
|
||||
slices.SortFunc(list, func(a, b oid.ID) int {
|
||||
return strings.Compare(a.EncodeToString(), b.EncodeToString())
|
||||
})
|
||||
slices.SortFunc(list, oid.ID.Cmp)
|
||||
|
||||
return &SearchObjectsRes{
|
||||
ids: list,
|
||||
|
|
|
@ -28,7 +28,7 @@ const (
|
|||
RPC = "rpc-endpoint"
|
||||
RPCShorthand = "r"
|
||||
RPCDefault = ""
|
||||
RPCUsage = "Remote node address (as 'multiaddr' or '<host>:<port>')"
|
||||
RPCUsage = "Remote node address ('<host>:<port>' or 'grpcs://<host>:<port>')"
|
||||
|
||||
Timeout = "timeout"
|
||||
TimeoutShorthand = "t"
|
||||
|
|
|
@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
|
|||
|
||||
outputPath, _ := cmd.Flags().GetString(outputFlag)
|
||||
if outputPath != "" {
|
||||
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
|
||||
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
|
||||
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
|
||||
} else {
|
||||
fmt.Print("\n")
|
||||
|
|
|
@ -6,8 +6,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// flags of list command.
|
||||
|
@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{
|
|||
|
||||
var prm internalclient.ListContainersPrm
|
||||
prm.SetClient(cli)
|
||||
prm.Account = idUser
|
||||
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
prm.OwnerID = idUser
|
||||
prmGet := internalclient.GetContainerPrm{
|
||||
Client: cli,
|
||||
}
|
||||
var containerIDs []cid.ID
|
||||
|
||||
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
|
||||
printContainer(cmd, prmGet, id)
|
||||
return false
|
||||
})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
containerIDs = res.SortedIDList()
|
||||
} else {
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
}
|
||||
|
||||
containerIDs := res.SortedIDList()
|
||||
for _, cnrID := range containerIDs {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(cnrID.String())
|
||||
continue
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &cnrID
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
continue
|
||||
}
|
||||
cmd.Println(cnrID.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
printContainer(cmd, prmGet, cnrID)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(id.String())
|
||||
return
|
||||
}
|
||||
|
||||
prmGet.ClientParams.ContainerID = &id
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
cmd.Printf(" failed to read attributes: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
cnr := res.Container()
|
||||
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
|
||||
return
|
||||
}
|
||||
cmd.Println(id.String())
|
||||
|
||||
if flagVarListPrintAttr {
|
||||
cnr.IterateUserAttributes(func(key, val string) {
|
||||
cmd.Printf(" %s: %s\n", key, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initContainerListContainersCmd() {
|
||||
commonflags.Init(listContainersCmd)
|
||||
|
||||
|
|
|
@ -23,11 +23,11 @@ type policyPlaygroundREPL struct {
|
|||
nodes map[string]netmap.NodeInfo
|
||||
}
|
||||
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) {
|
||||
func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL {
|
||||
return &policyPlaygroundREPL{
|
||||
cmd: cmd,
|
||||
nodes: map[string]netmap.NodeInfo{},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (repl *policyPlaygroundREPL) handleLs(args []string) error {
|
||||
|
@ -246,8 +246,7 @@ var policyPlaygroundCmd = &cobra.Command{
|
|||
Long: `A REPL for testing placement policies.
|
||||
If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`,
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
repl, err := newPolicyPlaygroundREPL(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err)
|
||||
repl := newPolicyPlaygroundREPL(cmd)
|
||||
commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run())
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const ignoreErrorsFlag = "no-errors"
|
||||
|
||||
var evacuateShardCmd = &cobra.Command{
|
||||
Use: "evacuate",
|
||||
Short: "Evacuate objects from shard",
|
||||
Long: "Evacuate objects from shard to other shards",
|
||||
Run: evacuateShard,
|
||||
Deprecated: "use frostfs-cli control shards evacuation start",
|
||||
}
|
||||
|
||||
func evacuateShard(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
|
||||
req.Body.Shard_ID = getShardIDList(cmd)
|
||||
req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.EvacuateShardResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.EvacuateShard(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount())
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Shard has successfully been evacuated.")
|
||||
}
|
||||
|
||||
func initControlEvacuateShardCmd() {
|
||||
initControlFlags(evacuateShardCmd)
|
||||
|
||||
flags := evacuateShardCmd.Flags()
|
||||
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
|
||||
flags.Bool(shardAllFlag, false, "Process all shards")
|
||||
flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
|
||||
|
||||
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
|
||||
}
|
|
@ -17,10 +17,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
awaitFlag = "await"
|
||||
noProgressFlag = "no-progress"
|
||||
scopeFlag = "scope"
|
||||
repOneOnlyFlag = "rep-one-only"
|
||||
ignoreErrorsFlag = "no-errors"
|
||||
|
||||
containerWorkerCountFlag = "container-worker-count"
|
||||
objectWorkerCountFlag = "object-worker-count"
|
||||
|
|
|
@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client.
|
|||
var resp *control.GetNetmapStatusResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *rawclient.Client) error {
|
||||
resp, err = control.GetNetmapStatus(client, req)
|
||||
resp, err = control.GetNetmapStatus(cmd.Context(), client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err)
|
||||
|
|
|
@ -13,7 +13,6 @@ var shardsCmd = &cobra.Command{
|
|||
func initControlShardsCmd() {
|
||||
shardsCmd.AddCommand(listShardsCmd)
|
||||
shardsCmd.AddCommand(setShardModeCmd)
|
||||
shardsCmd.AddCommand(evacuateShardCmd)
|
||||
shardsCmd.AddCommand(evacuationShardCmd)
|
||||
shardsCmd.AddCommand(flushCacheCmd)
|
||||
shardsCmd.AddCommand(doctorCmd)
|
||||
|
@ -23,7 +22,6 @@ func initControlShardsCmd() {
|
|||
|
||||
initControlShardsListCmd()
|
||||
initControlSetShardModeCmd()
|
||||
initControlEvacuateShardCmd()
|
||||
initControlEvacuationShardCmd()
|
||||
initControlFlushCacheCmd()
|
||||
initControlDoctorCmd()
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -42,7 +41,9 @@ func initObjectHashCmd() {
|
|||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
||||
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
|
||||
flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...")
|
||||
_ = objectHashCmd.MarkFlagRequired("range")
|
||||
|
||||
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
|
||||
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
|
||||
}
|
||||
|
@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
pk := key.GetOrGenerate(cmd)
|
||||
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
|
||||
|
||||
tz := typ == hashTz
|
||||
fullHash := len(ranges) == 0
|
||||
if fullHash {
|
||||
var headPrm internalclient.HeadObjectPrm
|
||||
headPrm.SetClient(cli)
|
||||
Prepare(cmd, &headPrm)
|
||||
headPrm.SetAddress(objAddr)
|
||||
|
||||
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
|
||||
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
var cs checksum.Checksum
|
||||
var csSet bool
|
||||
|
||||
if tz {
|
||||
cs, csSet = res.Header().PayloadHomomorphicHash()
|
||||
} else {
|
||||
cs, csSet = res.Header().PayloadChecksum()
|
||||
}
|
||||
|
||||
if csSet {
|
||||
cmd.Println(hex.EncodeToString(cs.Value()))
|
||||
} else {
|
||||
cmd.Println("Missing checksum in object header.")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var hashPrm internalclient.HashPayloadRangesPrm
|
||||
hashPrm.SetClient(cli)
|
||||
Prepare(cmd, &hashPrm)
|
||||
|
@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
|
|||
hashPrm.SetSalt(salt)
|
||||
hashPrm.SetRanges(ranges)
|
||||
|
||||
if tz {
|
||||
if typ == hashTz {
|
||||
hashPrm.TZ()
|
||||
}
|
||||
|
||||
|
|
|
@ -320,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem
|
|||
}
|
||||
placementBuilder := placement.NewNetworkMapBuilder(netmap)
|
||||
for _, object := range objects {
|
||||
placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy)
|
||||
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy)
|
||||
commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err)
|
||||
for repIdx, rep := range placement {
|
||||
numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects()
|
||||
|
@ -358,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem
|
|||
placementObjectID = object.ecHeader.parent
|
||||
}
|
||||
placementBuilder := placement.NewNetworkMapBuilder(netmap)
|
||||
placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy)
|
||||
placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy)
|
||||
commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err)
|
||||
|
||||
for _, vector := range placement {
|
||||
|
|
|
@ -46,7 +46,7 @@ func initObjectPatchCmd() {
|
|||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
||||
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2")
|
||||
flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2")
|
||||
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
|
||||
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
|
||||
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
|
||||
|
@ -99,11 +99,9 @@ func patch(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
|
||||
var rawAttrs []string
|
||||
|
||||
raw := cmd.Flag(newAttrsFlagName).Value.String()
|
||||
if len(raw) != 0 {
|
||||
rawAttrs = strings.Split(raw, ",")
|
||||
rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
|
||||
|
|
|
@ -50,7 +50,7 @@ func initObjectPutCmd() {
|
|||
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
|
||||
flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2")
|
||||
flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2")
|
||||
flags.Bool("disable-filename", false, "Do not set well-known filename attribute")
|
||||
flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute")
|
||||
flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object")
|
||||
|
@ -214,11 +214,9 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute {
|
|||
}
|
||||
|
||||
func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) {
|
||||
var rawAttrs []string
|
||||
|
||||
raw := cmd.Flag("attributes").Value.String()
|
||||
if len(raw) != 0 {
|
||||
rawAttrs = strings.Split(raw, ",")
|
||||
rawAttrs, err := cmd.Flags().GetStringSlice("attributes")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes
|
||||
|
|
|
@ -38,7 +38,7 @@ func initObjectRangeCmd() {
|
|||
flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage)
|
||||
_ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag)
|
||||
|
||||
flags.String("range", "", "Range to take data from in the form offset:length")
|
||||
flags.StringSlice("range", nil, "Range to take data from in the form offset:length")
|
||||
flags.String(fileFlag, "", "File to write object payload to. Default: stdout.")
|
||||
flags.Bool(rawFlag, false, rawFlagDesc)
|
||||
}
|
||||
|
@ -195,11 +195,10 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) {
|
|||
}
|
||||
|
||||
func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) {
|
||||
v := cmd.Flag("range").Value.String()
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
vs, err := cmd.Flags().GetStringSlice("range")
|
||||
if len(vs) == 0 || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vs := strings.Split(v, ",")
|
||||
rs := make([]objectSDK.Range, len(vs))
|
||||
for i := range vs {
|
||||
before, after, found := strings.Cut(vs[i], rangeSep)
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -34,11 +33,9 @@ func _client() (tree.TreeServiceClient, error) {
|
|||
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(
|
||||
metrics.NewUnaryClientInterceptor(),
|
||||
tracing.NewUnaryClientInteceptor(),
|
||||
),
|
||||
grpc.WithChainStreamInterceptor(
|
||||
metrics.NewStreamClientInterceptor(),
|
||||
tracing.NewStreamClientInterceptor(),
|
||||
),
|
||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||
|
|
|
@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.init(ctx)
|
||||
|
|
|
@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
|
|||
innerRing.Stop(ctx)
|
||||
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -2,13 +2,17 @@ package meta
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
|
||||
schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
|
||||
"github.com/rivo/tview"
|
||||
"github.com/spf13/cobra"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var tuiCMD = &cobra.Command{
|
||||
|
@ -27,6 +31,11 @@ Available search filters:
|
|||
|
||||
var initialPrompt string
|
||||
|
||||
var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{
|
||||
2: schema.MetabaseParserV2,
|
||||
3: schema.MetabaseParserV3,
|
||||
}
|
||||
|
||||
func init() {
|
||||
common.AddComponentPathFlag(tuiCMD, &vPath)
|
||||
|
||||
|
@ -49,12 +58,22 @@ func runTUI(cmd *cobra.Command) error {
|
|||
}
|
||||
defer db.Close()
|
||||
|
||||
schemaVersion, hasVersion := lookupSchemaVersion(cmd, db)
|
||||
if !hasVersion {
|
||||
return errors.New("couldn't detect schema version")
|
||||
}
|
||||
|
||||
metabaseParser, ok := parserPerSchemaVersion[schemaVersion]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown schema version %d", schemaVersion)
|
||||
}
|
||||
|
||||
// Need if app was stopped with Ctrl-C.
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
app := tview.NewApplication()
|
||||
ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
|
||||
ui := tui.NewUI(ctx, app, db, metabaseParser, nil)
|
||||
|
||||
_ = ui.AddFilter("cid", tui.CIDParser, "CID")
|
||||
_ = ui.AddFilter("oid", tui.OIDParser, "OID")
|
||||
|
@ -69,3 +88,31 @@ func runTUI(cmd *cobra.Command) error {
|
|||
app.SetRoot(ui, true).SetFocus(ui)
|
||||
return app.Run()
|
||||
}
|
||||
|
||||
var (
|
||||
shardInfoBucket = []byte{5}
|
||||
versionRecord = []byte("version")
|
||||
)
|
||||
|
||||
func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) {
|
||||
err := db.View(func(tx *bbolt.Tx) error {
|
||||
bkt := tx.Bucket(shardInfoBucket)
|
||||
if bkt == nil {
|
||||
return nil
|
||||
}
|
||||
rec := bkt.Get(versionRecord)
|
||||
if rec == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
version = binary.LittleEndian.Uint64(rec)
|
||||
ok = true
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -80,10 +80,15 @@ var (
|
|||
},
|
||||
)
|
||||
|
||||
UserAttributeParser = NewUserAttributeKeyBucketParser(
|
||||
UserAttributeParserV2 = NewUserAttributeKeyBucketParser(
|
||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
||||
)
|
||||
|
||||
UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys(
|
||||
NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
|
||||
[]string{"FilePath", "S3-Access-Box-CRDT-Name"},
|
||||
)
|
||||
|
||||
PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
|
||||
cidResolver: StrictResolver,
|
||||
oidResolver: StrictResolver,
|
||||
|
@ -108,4 +113,14 @@ var (
|
|||
cidResolver: StrictResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
|
||||
ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{
|
||||
cidResolver: LenientResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
|
||||
ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{
|
||||
cidResolver: StrictResolver,
|
||||
oidResolver: LenientResolver,
|
||||
})
|
||||
)
|
||||
|
|
|
@ -22,27 +22,31 @@ const (
|
|||
Split
|
||||
ContainerCounters
|
||||
ECInfo
|
||||
ExpirationEpochToObject
|
||||
ObjectToExpirationEpoch
|
||||
)
|
||||
|
||||
var x = map[Prefix]string{
|
||||
Graveyard: "Graveyard",
|
||||
Garbage: "Garbage",
|
||||
ToMoveIt: "To Move It",
|
||||
ContainerVolume: "Container Volume",
|
||||
Locked: "Locked",
|
||||
ShardInfo: "Shard Info",
|
||||
Primary: "Primary",
|
||||
Lockers: "Lockers",
|
||||
Tombstone: "Tombstone",
|
||||
Small: "Small",
|
||||
Root: "Root",
|
||||
Owner: "Owner",
|
||||
UserAttribute: "User Attribute",
|
||||
PayloadHash: "Payload Hash",
|
||||
Parent: "Parent",
|
||||
Split: "Split",
|
||||
ContainerCounters: "Container Counters",
|
||||
ECInfo: "EC Info",
|
||||
Graveyard: "Graveyard",
|
||||
Garbage: "Garbage",
|
||||
ToMoveIt: "To Move It",
|
||||
ContainerVolume: "Container Volume",
|
||||
Locked: "Locked",
|
||||
ShardInfo: "Shard Info",
|
||||
Primary: "Primary",
|
||||
Lockers: "Lockers",
|
||||
Tombstone: "Tombstone",
|
||||
Small: "Small",
|
||||
Root: "Root",
|
||||
Owner: "Owner",
|
||||
UserAttribute: "User Attribute",
|
||||
PayloadHash: "Payload Hash",
|
||||
Parent: "Parent",
|
||||
Split: "Split",
|
||||
ContainerCounters: "Container Counters",
|
||||
ECInfo: "EC Info",
|
||||
ExpirationEpochToObject: "Exp. Epoch to Object",
|
||||
ObjectToExpirationEpoch: "Object to Exp. Epoch",
|
||||
}
|
||||
|
||||
func (p Prefix) String() string {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
func (b *PrefixBucket) String() string {
|
||||
return common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string {
|
|||
return fmt.Sprintf(
|
||||
"%s CID %s",
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
),
|
||||
common.FormatSimple(b.id.String(), tcell.ColorAqua),
|
||||
)
|
||||
|
@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string {
|
|||
func (b *UserAttributeKeyBucket) String() string {
|
||||
return fmt.Sprintf("%s CID %s ATTR-KEY %s",
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime,
|
||||
),
|
||||
common.FormatSimple(
|
||||
fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
|
||||
|
|
|
@ -2,6 +2,7 @@ package buckets
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"slices"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -57,10 +58,11 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
ErrNotBucket = errors.New("not a bucket")
|
||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
||||
ErrInvalidValueLength = errors.New("invalid value length")
|
||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
||||
ErrNotBucket = errors.New("not a bucket")
|
||||
ErrInvalidKeyLength = errors.New("invalid key length")
|
||||
ErrInvalidValueLength = errors.New("invalid value length")
|
||||
ErrInvalidPrefix = errors.New("invalid prefix")
|
||||
ErrUnexpectedAttributeKey = errors.New("unexpected attribute key")
|
||||
)
|
||||
|
||||
func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
|
||||
|
@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa
|
|||
}
|
||||
|
||||
func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
||||
return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil)
|
||||
}
|
||||
|
||||
func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser {
|
||||
return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if value != nil {
|
||||
return nil, nil, ErrNotBucket
|
||||
|
@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
|
|||
return nil, nil, err
|
||||
}
|
||||
b.key = string(key[33:])
|
||||
|
||||
if len(keys) != 0 && !slices.Contains(keys, b.key) {
|
||||
return nil, nil, ErrUnexpectedAttributeKey
|
||||
}
|
||||
|
||||
return &b, next, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,30 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
|
||||
)
|
||||
|
||||
var MetabaseParser = common.WithFallback(
|
||||
var MetabaseParserV3 = common.WithFallback(
|
||||
common.Any(
|
||||
buckets.GraveyardParser,
|
||||
buckets.GarbageParser,
|
||||
buckets.ContainerVolumeParser,
|
||||
buckets.LockedParser,
|
||||
buckets.ShardInfoParser,
|
||||
buckets.PrimaryParser,
|
||||
buckets.LockersParser,
|
||||
buckets.TombstoneParser,
|
||||
buckets.SmallParser,
|
||||
buckets.RootParser,
|
||||
buckets.UserAttributeParserV3,
|
||||
buckets.ParentParser,
|
||||
buckets.SplitParser,
|
||||
buckets.ContainerCountersParser,
|
||||
buckets.ECInfoParser,
|
||||
buckets.ExpirationEpochToObjectParser,
|
||||
buckets.ObjectToExpirationEpochParser,
|
||||
),
|
||||
common.RawParser.ToFallbackParser(),
|
||||
)
|
||||
|
||||
var MetabaseParserV2 = common.WithFallback(
|
||||
common.Any(
|
||||
buckets.GraveyardParser,
|
||||
buckets.GarbageParser,
|
||||
|
@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback(
|
|||
buckets.SmallParser,
|
||||
buckets.RootParser,
|
||||
buckets.OwnerParser,
|
||||
buckets.UserAttributeParser,
|
||||
buckets.UserAttributeParserV2,
|
||||
buckets.PayloadHashParser,
|
||||
buckets.ParentParser,
|
||||
buckets.SplitParser,
|
||||
|
|
|
@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string {
|
|||
func (r *ECInfoRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) DetailedString() string {
|
||||
return spew.Sdump(*r)
|
||||
}
|
||||
|
|
|
@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
|
|||
return common.No
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult {
|
||||
switch typ {
|
||||
case "cid":
|
||||
id := val.(cid.ID)
|
||||
return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No)
|
||||
case "oid":
|
||||
id := val.(oid.ID)
|
||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
||||
default:
|
||||
return common.No
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult {
|
||||
switch typ {
|
||||
case "oid":
|
||||
id := val.(oid.ID)
|
||||
return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No)
|
||||
default:
|
||||
return common.No
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e
|
|||
}
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
||||
func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if len(key) != 72 {
|
||||
return nil, nil, ErrInvalidKeyLength
|
||||
}
|
||||
|
||||
var (
|
||||
r ExpirationEpochToObjectRecord
|
||||
err error
|
||||
)
|
||||
|
||||
r.epoch = binary.BigEndian.Uint64(key[:8])
|
||||
if err = r.cnt.Decode(key[8:40]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err = r.obj.Decode(key[40:]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
||||
func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
|
||||
if len(key) != 32 {
|
||||
return nil, nil, ErrInvalidKeyLength
|
||||
}
|
||||
if len(value) != 8 {
|
||||
return nil, nil, ErrInvalidValueLength
|
||||
}
|
||||
|
||||
var (
|
||||
r ObjectToExpirationEpochRecord
|
||||
err error
|
||||
)
|
||||
|
||||
if err = r.obj.Decode(key); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r.epoch = binary.LittleEndian.Uint64(value)
|
||||
|
||||
return &r, nil, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package records
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
|
@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string {
|
|||
len(r.ids),
|
||||
)
|
||||
}
|
||||
|
||||
func (r *ExpirationEpochToObjectRecord) String() string {
|
||||
return fmt.Sprintf(
|
||||
"exp. epoch %s %c CID %s OID %s",
|
||||
common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua),
|
||||
tview.Borders.Vertical,
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua),
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
||||
)
|
||||
}
|
||||
|
||||
func (r *ObjectToExpirationEpochRecord) String() string {
|
||||
return fmt.Sprintf(
|
||||
"OID %s %c exp. epoch %s",
|
||||
common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua),
|
||||
tview.Borders.Vertical,
|
||||
common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -79,4 +79,15 @@ type (
|
|||
id oid.ID
|
||||
ids []oid.ID
|
||||
}
|
||||
|
||||
ExpirationEpochToObjectRecord struct {
|
||||
epoch uint64
|
||||
cnt cid.ID
|
||||
obj oid.ID
|
||||
}
|
||||
|
||||
ObjectToExpirationEpochRecord struct {
|
||||
obj oid.ID
|
||||
epoch uint64
|
||||
}
|
||||
)
|
||||
|
|
|
@ -124,10 +124,7 @@ func (v *BucketsView) loadNodeChildren(
|
|||
path := parentBucket.Path
|
||||
parser := parentBucket.NextParser
|
||||
|
||||
buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
|
||||
|
||||
for item := range buffer {
|
||||
if item.err != nil {
|
||||
|
@ -135,6 +132,7 @@ func (v *BucketsView) loadNodeChildren(
|
|||
}
|
||||
bucket := item.val
|
||||
|
||||
var err error
|
||||
bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -180,10 +178,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
defer cancel()
|
||||
|
||||
// Check the current bucket's nested buckets if exist
|
||||
bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
|
||||
for item := range bucketsBuffer {
|
||||
if item.err != nil {
|
||||
|
@ -191,6 +186,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
b := item.val
|
||||
|
||||
var err error
|
||||
b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -206,10 +202,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
|
||||
// Check the current bucket's nested records if exist
|
||||
recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
|
||||
|
||||
for item := range recordsBuffer {
|
||||
if item.err != nil {
|
||||
|
@ -217,6 +210,7 @@ func (v *BucketsView) bucketSatisfiesFilter(
|
|||
}
|
||||
r := item.val
|
||||
|
||||
var err error
|
||||
r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
|
|||
func load[T any](
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
filter func(key, value []byte) bool, transform func(key, value []byte) T,
|
||||
) (<-chan Item[T], error) {
|
||||
) <-chan Item[T] {
|
||||
buffer := make(chan Item[T], bufferSize)
|
||||
|
||||
go func() {
|
||||
|
@ -77,13 +77,13 @@ func load[T any](
|
|||
}
|
||||
}()
|
||||
|
||||
return buffer, nil
|
||||
return buffer
|
||||
}
|
||||
|
||||
func LoadBuckets(
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
) (<-chan Item[*Bucket], error) {
|
||||
buffer, err := load(
|
||||
) <-chan Item[*Bucket] {
|
||||
buffer := load(
|
||||
ctx, db, path, bufferSize,
|
||||
func(_, value []byte) bool {
|
||||
return value == nil
|
||||
|
@ -98,17 +98,14 @@ func LoadBuckets(
|
|||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
||||
}
|
||||
|
||||
return buffer, nil
|
||||
return buffer
|
||||
}
|
||||
|
||||
func LoadRecords(
|
||||
ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
|
||||
) (<-chan Item[*Record], error) {
|
||||
buffer, err := load(
|
||||
) <-chan Item[*Record] {
|
||||
buffer := load(
|
||||
ctx, db, path, bufferSize,
|
||||
func(_, value []byte) bool {
|
||||
return value != nil
|
||||
|
@ -124,11 +121,8 @@ func LoadRecords(
|
|||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't start iterating bucket: %w", err)
|
||||
}
|
||||
|
||||
return buffer, nil
|
||||
return buffer
|
||||
}
|
||||
|
||||
// HasBuckets checks if a bucket has nested buckets. It relies on assumption
|
||||
|
@ -137,24 +131,21 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error)
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
buffer, err := load(
|
||||
buffer := load(
|
||||
ctx, db, path, 1,
|
||||
nil,
|
||||
func(_, value []byte) []byte { return value },
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
x, ok := <-buffer
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
if x.err != nil {
|
||||
return false, err
|
||||
return false, x.err
|
||||
}
|
||||
if x.val != nil {
|
||||
return false, err
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -62,10 +62,7 @@ func (v *RecordsView) Mount(ctx context.Context) error {
|
|||
|
||||
ctx, v.onUnmount = context.WithCancel(ctx)
|
||||
|
||||
tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
|
||||
|
||||
v.buffer = make(chan *Record, v.ui.loadBufferSize)
|
||||
go func() {
|
||||
|
@ -73,11 +70,12 @@ func (v *RecordsView) Mount(ctx context.Context) error {
|
|||
|
||||
for item := range tempBuffer {
|
||||
if item.err != nil {
|
||||
v.ui.stopOnError(err)
|
||||
v.ui.stopOnError(item.err)
|
||||
break
|
||||
}
|
||||
record := item.val
|
||||
|
||||
var err error
|
||||
record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
|
||||
if err != nil {
|
||||
v.ui.stopOnError(err)
|
||||
|
|
|
@ -19,6 +19,7 @@ func initAPEManagerService(c *cfg) {
|
|||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage,
|
||||
c.cfgMorph.client,
|
||||
apemanager.WithLogger(c.log))
|
||||
sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc)
|
||||
auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -16,7 +17,7 @@ import (
|
|||
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||
)
|
||||
|
||||
type netValueReader[K any, V any] func(K) (V, error)
|
||||
type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error)
|
||||
|
||||
type valueWithError[V any] struct {
|
||||
v V
|
||||
|
@ -49,7 +50,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
|
|||
// updates the value from the network on cache miss or by TTL.
|
||||
//
|
||||
// returned value should not be modified.
|
||||
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
|
||||
func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
|
@ -71,7 +72,7 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) {
|
|||
return val.v, val.e
|
||||
}
|
||||
|
||||
v, err := c.netRdr(key)
|
||||
v, err := c.netRdr(ctx, key)
|
||||
|
||||
c.cache.Add(key, &valueWithError[V]{
|
||||
v: v,
|
||||
|
@ -135,7 +136,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]
|
|||
// updates the value from the network on cache miss.
|
||||
//
|
||||
// returned value should not be modified.
|
||||
func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
|
||||
func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
|
@ -148,7 +149,7 @@ func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
val, err := c.netRdr(key)
|
||||
val, err := c.netRdr(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -166,11 +167,11 @@ type ttlContainerStorage struct {
|
|||
}
|
||||
|
||||
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
|
||||
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
|
||||
return v.Get(id)
|
||||
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) {
|
||||
return v.Get(ctx, id)
|
||||
}, metrics.NewCacheMetrics("container"))
|
||||
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
|
||||
return v.DeletionInfo(id)
|
||||
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) {
|
||||
return v.DeletionInfo(ctx, id)
|
||||
}, metrics.NewCacheMetrics("container_deletion_info"))
|
||||
|
||||
return ttlContainerStorage{
|
||||
|
@ -188,12 +189,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
|
|||
|
||||
// Get returns container value from the cache. If value is missing in the cache
|
||||
// or expired, then it returns value from side chain and updates the cache.
|
||||
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
|
||||
return s.containerCache.get(cnr)
|
||||
func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) {
|
||||
return s.containerCache.get(ctx, cnr)
|
||||
}
|
||||
|
||||
func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
|
||||
return s.delInfoCache.get(cnr)
|
||||
func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) {
|
||||
return s.delInfoCache.get(ctx, cnr)
|
||||
}
|
||||
|
||||
type lruNetmapSource struct {
|
||||
|
@ -205,8 +206,8 @@ type lruNetmapSource struct {
|
|||
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
|
||||
const netmapCacheSize = 10
|
||||
|
||||
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
|
||||
return v.GetNetMapByEpoch(key)
|
||||
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) {
|
||||
return v.GetNetMapByEpoch(ctx, key)
|
||||
}, metrics.NewCacheMetrics("netmap"))
|
||||
|
||||
return &lruNetmapSource{
|
||||
|
@ -215,16 +216,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
|
||||
func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff)
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(epoch)
|
||||
func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(ctx, epoch)
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
val, err := s.cache.get(epoch)
|
||||
func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
val, err := s.cache.get(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -232,7 +233,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err
|
|||
return val, nil
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) Epoch() (uint64, error) {
|
||||
func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) {
|
||||
return s.netState.CurrentEpoch(), nil
|
||||
}
|
||||
|
||||
|
@ -240,7 +241,10 @@ type cachedIRFetcher struct {
|
|||
*ttlNetCache[struct{}, [][]byte]
|
||||
}
|
||||
|
||||
func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
|
||||
func newCachedIRFetcher(f interface {
|
||||
InnerRingKeys(ctx context.Context) ([][]byte, error)
|
||||
},
|
||||
) cachedIRFetcher {
|
||||
const (
|
||||
irFetcherCacheSize = 1 // we intend to store only one value
|
||||
|
||||
|
@ -254,8 +258,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached
|
|||
)
|
||||
|
||||
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
|
||||
func(_ struct{}) ([][]byte, error) {
|
||||
return f.InnerRingKeys()
|
||||
func(ctx context.Context, _ struct{}) ([][]byte, error) {
|
||||
return f.InnerRingKeys(ctx)
|
||||
}, metrics.NewCacheMetrics("ir_keys"),
|
||||
)
|
||||
|
||||
|
@ -265,8 +269,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached
|
|||
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
|
||||
// the cache or expired, then it returns keys from side chain and updates
|
||||
// the cache.
|
||||
func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
|
||||
val, err := f.get(struct{}{})
|
||||
func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) {
|
||||
val, err := f.get(ctx, struct{}{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -289,7 +293,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M
|
|||
}
|
||||
}
|
||||
|
||||
func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
|
||||
func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 {
|
||||
const ttl = time.Second * 30
|
||||
|
||||
hit := false
|
||||
|
@ -311,7 +315,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 {
|
|||
c.mtx.Lock()
|
||||
size = c.lastSize
|
||||
if !c.lastUpdated.After(prevUpdated) {
|
||||
size = c.src.MaxObjectSize()
|
||||
size = c.src.MaxObjectSize(ctx)
|
||||
c.lastSize = size
|
||||
c.lastUpdated = time.Now()
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -17,7 +18,7 @@ func TestTTLNetCache(t *testing.T) {
|
|||
t.Run("Test Add and Get", func(t *testing.T) {
|
||||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
val, err := cache.get(key)
|
||||
val, err := cache.get(context.Background(), key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ti, val)
|
||||
})
|
||||
|
@ -26,7 +27,7 @@ func TestTTLNetCache(t *testing.T) {
|
|||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
time.Sleep(2 * ttlDuration)
|
||||
val, err := cache.get(key)
|
||||
val, err := cache.get(context.Background(), key)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, val, ti)
|
||||
})
|
||||
|
@ -35,20 +36,20 @@ func TestTTLNetCache(t *testing.T) {
|
|||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
cache.remove(key)
|
||||
val, err := cache.get(key)
|
||||
val, err := cache.get(context.Background(), key)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, val, ti)
|
||||
})
|
||||
|
||||
t.Run("Test Cache Error", func(t *testing.T) {
|
||||
cache.set("error", time.Now(), errors.New("mock error"))
|
||||
_, err := cache.get("error")
|
||||
_, err := cache.get(context.Background(), "error")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "mock error", err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func testNetValueReader(key string) (time.Time, error) {
|
||||
func testNetValueReader(_ context.Context, key string) (time.Time, error) {
|
||||
if key == "error" {
|
||||
return time.Now(), errors.New("mock error")
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||
|
@ -69,6 +70,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -134,6 +136,7 @@ type shardCfg struct {
|
|||
refillMetabase bool
|
||||
refillMetabaseWorkersCount int
|
||||
mode shardmode.Mode
|
||||
limiter qos.Limiter
|
||||
|
||||
metaCfg struct {
|
||||
path string
|
||||
|
@ -253,39 +256,42 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
|
||||
var newConfig shardCfg
|
||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error {
|
||||
var target shardCfg
|
||||
|
||||
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
|
||||
newConfig.mode = oldConfig.Mode()
|
||||
newConfig.compress = oldConfig.Compress()
|
||||
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
|
||||
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
|
||||
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||
target.refillMetabase = source.RefillMetabase()
|
||||
target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount()
|
||||
target.mode = source.Mode()
|
||||
target.compress = source.Compress()
|
||||
target.estimateCompressibility = source.EstimateCompressibility()
|
||||
target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold()
|
||||
target.uncompressableContentType = source.UncompressableContentTypes()
|
||||
target.smallSizeObjectLimit = source.SmallSizeLimit()
|
||||
|
||||
a.setShardWriteCacheConfig(&newConfig, oldConfig)
|
||||
a.setShardWriteCacheConfig(&target, source)
|
||||
|
||||
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
|
||||
a.setShardPiloramaConfig(c, &target, source)
|
||||
|
||||
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
|
||||
if err := a.setShardStorageConfig(&target, source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.setMetabaseConfig(&newConfig, oldConfig)
|
||||
a.setMetabaseConfig(&target, source)
|
||||
|
||||
a.setGCConfig(&newConfig, oldConfig)
|
||||
a.setGCConfig(&target, source)
|
||||
if err := a.setLimiter(&target, source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, target)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
writeCacheCfg := oldConfig.WriteCache()
|
||||
func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
writeCacheCfg := source.WriteCache()
|
||||
if writeCacheCfg.Enabled() {
|
||||
wc := &newConfig.writecacheCfg
|
||||
wc := &target.writecacheCfg
|
||||
|
||||
wc.enabled = true
|
||||
wc.path = writeCacheCfg.Path()
|
||||
|
@ -298,10 +304,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) {
|
||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||
piloramaCfg := oldConfig.Pilorama()
|
||||
pr := &newConfig.piloramaCfg
|
||||
piloramaCfg := source.Pilorama()
|
||||
pr := &target.piloramaCfg
|
||||
|
||||
pr.enabled = true
|
||||
pr.path = piloramaCfg.Path()
|
||||
|
@ -312,8 +318,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC
|
|||
}
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
|
||||
blobStorCfg := oldConfig.BlobStor()
|
||||
func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error {
|
||||
blobStorCfg := source.BlobStor()
|
||||
storagesCfg := blobStorCfg.Storages()
|
||||
|
||||
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
||||
|
@ -347,13 +353,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
|
|||
ss = append(ss, sCfg)
|
||||
}
|
||||
|
||||
newConfig.subStorages = ss
|
||||
target.subStorages = ss
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
metabaseCfg := oldConfig.Metabase()
|
||||
m := &newConfig.metaCfg
|
||||
func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
metabaseCfg := source.Metabase()
|
||||
m := &target.metaCfg
|
||||
|
||||
m.path = metabaseCfg.Path()
|
||||
m.perm = metabaseCfg.BoltDB().Perm()
|
||||
|
@ -361,12 +367,25 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon
|
|||
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
gcCfg := oldConfig.GC()
|
||||
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||
func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) {
|
||||
gcCfg := source.GC()
|
||||
target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error {
|
||||
limitsConfig := source.Limits()
|
||||
limiter, err := qos.NewLimiter(limitsConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target.limiter != nil {
|
||||
target.limiter.Close()
|
||||
}
|
||||
target.limiter = limiter
|
||||
return nil
|
||||
}
|
||||
|
||||
// internals contains application-specific internals that are created
|
||||
|
@ -493,6 +512,7 @@ type cfg struct {
|
|||
cfgNetmap cfgNetmap
|
||||
cfgControlService cfgControlService
|
||||
cfgObject cfgObject
|
||||
cfgQoSService cfgQoSService
|
||||
}
|
||||
|
||||
// ReadCurrentNetMap reads network map which has been cached at the
|
||||
|
@ -527,6 +547,8 @@ type cfgGRPC struct {
|
|||
maxChunkSize uint64
|
||||
maxAddrAmount uint64
|
||||
reconnectTimeout time.Duration
|
||||
|
||||
limiter atomic.Pointer[limiting.SemaphoreLimiter]
|
||||
}
|
||||
|
||||
func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) {
|
||||
|
@ -591,8 +613,6 @@ type cfgMorph struct {
|
|||
|
||||
client *client.Client
|
||||
|
||||
notaryEnabled bool
|
||||
|
||||
// TTL of Sidechain cached values. Non-positive value disables caching.
|
||||
cacheTTL time.Duration
|
||||
|
||||
|
@ -608,9 +628,10 @@ type cfgAccounting struct {
|
|||
type cfgContainer struct {
|
||||
scriptHash neogoutil.Uint160
|
||||
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
parsers map[event.Type]event.NotificationParser
|
||||
subscribers map[event.Type][]event.Handler
|
||||
workerPool util.WorkerPool // pool for asynchronous handlers
|
||||
containerBatchSize uint32
|
||||
}
|
||||
|
||||
type cfgFrostfsID struct {
|
||||
|
@ -664,10 +685,6 @@ type cfgAccessPolicyEngine struct {
|
|||
}
|
||||
|
||||
type cfgObjectRoutines struct {
|
||||
putRemote *ants.Pool
|
||||
|
||||
putLocal *ants.Pool
|
||||
|
||||
replication *ants.Pool
|
||||
}
|
||||
|
||||
|
@ -699,8 +716,7 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
logPrm := c.loggerPrm()
|
||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
|
@ -853,14 +869,14 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
|
|||
}
|
||||
}
|
||||
|
||||
func initCfgGRPC() cfgGRPC {
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
|
||||
func initCfgGRPC() (cfg cfgGRPC) {
|
||||
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
|
||||
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
|
||||
|
||||
return cfgGRPC{
|
||||
maxChunkSize: maxChunkSize,
|
||||
maxAddrAmount: maxAddrAmount,
|
||||
}
|
||||
cfg.maxChunkSize = maxChunkSize
|
||||
cfg.maxAddrAmount = maxAddrAmount
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func initCfgObject(appCfg *config.Config) cfgObject {
|
||||
|
@ -1056,11 +1072,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
|||
|
||||
return pool
|
||||
}),
|
||||
shard.WithLimiter(shCfg.limiter),
|
||||
}
|
||||
return sh
|
||||
}
|
||||
|
||||
func (c *cfg) loggerPrm() (*logger.Prm, error) {
|
||||
func (c *cfg) loggerPrm() *logger.Prm {
|
||||
// check if it has been inited before
|
||||
if c.dynamicConfiguration.logger == nil {
|
||||
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||
|
@ -1079,7 +1096,7 @@ func (c *cfg) loggerPrm() (*logger.Prm, error) {
|
|||
}
|
||||
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||
|
||||
return c.dynamicConfiguration.logger, nil
|
||||
return c.dynamicConfiguration.logger
|
||||
}
|
||||
|
||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||
|
@ -1121,7 +1138,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
err := ls.Close(context.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
|
@ -1148,7 +1165,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
|||
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
|
||||
|
||||
cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
|
||||
if cacheSize > 0 {
|
||||
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
|
||||
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
|
||||
}
|
||||
|
||||
|
@ -1167,21 +1184,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
|
|||
func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
||||
var err error
|
||||
|
||||
optNonBlocking := ants.WithNonblocking(true)
|
||||
|
||||
putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote()
|
||||
pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking)
|
||||
fatalOnErr(err)
|
||||
|
||||
putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal()
|
||||
pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking)
|
||||
fatalOnErr(err)
|
||||
|
||||
replicatorPoolSize := replicatorconfig.PoolSize(cfg)
|
||||
if replicatorPoolSize <= 0 {
|
||||
replicatorPoolSize = putRemoteCapacity
|
||||
}
|
||||
|
||||
pool.replication, err = ants.NewPool(replicatorPoolSize)
|
||||
fatalOnErr(err)
|
||||
|
||||
|
@ -1207,11 +1210,11 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) {
|
|||
}
|
||||
|
||||
func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
||||
ni, err := c.netmapLocalNodeState(epoch)
|
||||
ni, err := c.netmapLocalNodeState(ctx, epoch)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1221,9 +1224,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
|||
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
|
||||
// with the binary-encoded information from the current node's configuration.
|
||||
// The state is set using the provided setter which MUST NOT be nil.
|
||||
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
|
||||
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
|
||||
ni := c.cfgNodeInfo.localInfo
|
||||
stateSetter(&ni)
|
||||
ni.SetStatus(state)
|
||||
|
||||
prm := nmClient.AddPeerPrm{}
|
||||
prm.SetNodeInfo(ni)
|
||||
|
@ -1233,9 +1236,7 @@ func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.N
|
|||
|
||||
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
|
||||
func bootstrapOnline(ctx context.Context, c *cfg) error {
|
||||
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Online)
|
||||
})
|
||||
return c.bootstrapWithState(ctx, netmap.Online)
|
||||
}
|
||||
|
||||
// bootstrap calls bootstrapWithState with:
|
||||
|
@ -1246,9 +1247,7 @@ func (c *cfg) bootstrap(ctx context.Context) error {
|
|||
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||
if st == control.NetmapStatus_MAINTENANCE {
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
|
||||
ni.SetStatus(netmap.Maintenance)
|
||||
})
|
||||
return c.bootstrapWithState(ctx, netmap.Maintenance)
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||
|
@ -1339,11 +1338,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
|||
|
||||
// Logger
|
||||
|
||||
logPrm, err := c.loggerPrm()
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||
return
|
||||
}
|
||||
logPrm := c.loggerPrm()
|
||||
|
||||
components := c.getComponents(ctx, logPrm)
|
||||
|
||||
|
@ -1419,17 +1414,13 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
|||
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
|
||||
}
|
||||
|
||||
components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }})
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func (c *cfg) reloadPools() error {
|
||||
newSize := objectconfig.Put(c.appCfg).PoolSizeLocal()
|
||||
c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size")
|
||||
|
||||
newSize = objectconfig.Put(c.appCfg).PoolSizeRemote()
|
||||
c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size")
|
||||
|
||||
newSize = replicatorconfig.PoolSize(c.appCfg)
|
||||
newSize := replicatorconfig.PoolSize(c.appCfg)
|
||||
c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size")
|
||||
|
||||
return nil
|
||||
|
@ -1466,7 +1457,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
|||
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
|
||||
return container.NewInfoProvider(func() (container.Source, error) {
|
||||
c.initMorphComponents(ctx)
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
|
||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||
|
@ -52,6 +53,5 @@ func (x *Config) Value(name string) any {
|
|||
// It supports only one level of nesting and is intended to be used
|
||||
// to provide default values.
|
||||
func (x *Config) SetDefault(from *Config) {
|
||||
x.defaultPath = make([]string, len(from.path))
|
||||
copy(x.defaultPath, from.path)
|
||||
x.defaultPath = slices.Clone(from.path)
|
||||
}
|
||||
|
|
27
cmd/frostfs-node/config/container/container.go
Normal file
27
cmd/frostfs-node/config/container/container.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package containerconfig
|
||||
|
||||
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
|
||||
const (
|
||||
subsection = "container"
|
||||
listStreamSubsection = "list_stream"
|
||||
|
||||
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
|
||||
ContainerBatchSizeDefault = 1000
|
||||
)
|
||||
|
||||
// ContainerBatchSize returns the value of "batch_size" config parameter
|
||||
// from "list_stream" subsection of "container" section.
|
||||
//
|
||||
// Returns ContainerBatchSizeDefault if the value is missing or if
|
||||
// the value is not positive integer.
|
||||
func ContainerBatchSize(c *config.Config) uint32 {
|
||||
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
|
||||
if size == 0 {
|
||||
return ContainerBatchSizeDefault
|
||||
}
|
||||
return size
|
||||
}
|
27
cmd/frostfs-node/config/container/container_test.go
Normal file
27
cmd/frostfs-node/config/container/container_test.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package containerconfig_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestContainerSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
|
||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
|
@ -76,6 +77,7 @@ func TestEngineSection(t *testing.T) {
|
|||
ss := blob.Storages()
|
||||
pl := sc.Pilorama()
|
||||
gc := sc.GC()
|
||||
limits := sc.Limits()
|
||||
|
||||
switch num {
|
||||
case 0:
|
||||
|
@ -134,6 +136,75 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, false, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
require.Equal(t, 30*time.Second, readLimits.IdleTimeout)
|
||||
require.Equal(t, int64(10_000), readLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(1_000), readLimits.MaxWaitingOps)
|
||||
require.Equal(t, 45*time.Second, writeLimits.IdleTimeout)
|
||||
require.Equal(t, int64(1_000), writeLimits.MaxRunningOps)
|
||||
require.Equal(t, int64(100), writeLimits.MaxWaitingOps)
|
||||
require.ElementsMatch(t, readLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(20),
|
||||
ReservedOps: toPtr(1000),
|
||||
LimitOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "client",
|
||||
Weight: toPtr(70),
|
||||
ReservedOps: toPtr(10000),
|
||||
},
|
||||
{
|
||||
Tag: "background",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(10000),
|
||||
ReservedOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "writecache",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
},
|
||||
{
|
||||
Tag: "policer",
|
||||
Weight: toPtr(5),
|
||||
LimitOps: toPtr(25000),
|
||||
},
|
||||
})
|
||||
require.ElementsMatch(t, writeLimits.Tags,
|
||||
[]limitsconfig.IOTagConfig{
|
||||
{
|
||||
Tag: "internal",
|
||||
Weight: toPtr(200),
|
||||
ReservedOps: toPtr(100),
|
||||
LimitOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "client",
|
||||
Weight: toPtr(700),
|
||||
ReservedOps: toPtr(1000),
|
||||
},
|
||||
{
|
||||
Tag: "background",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(1000),
|
||||
ReservedOps: toPtr(0),
|
||||
},
|
||||
{
|
||||
Tag: "writecache",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(2500),
|
||||
},
|
||||
{
|
||||
Tag: "policer",
|
||||
Weight: toPtr(50),
|
||||
LimitOps: toPtr(2500),
|
||||
},
|
||||
})
|
||||
case 1:
|
||||
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
||||
require.Equal(t, fs.FileMode(0o644), pl.Perm())
|
||||
|
@ -188,6 +259,17 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, true, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
|
||||
|
||||
readLimits := limits.Read()
|
||||
writeLimits := limits.Write()
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps)
|
||||
require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps)
|
||||
require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps)
|
||||
require.Equal(t, 0, len(readLimits.Tags))
|
||||
require.Equal(t, 0, len(writeLimits.Tags))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -201,3 +283,7 @@ func TestEngineSection(t *testing.T) {
|
|||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
||||
|
||||
func toPtr(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor"
|
||||
gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc"
|
||||
limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits"
|
||||
metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase"
|
||||
piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama"
|
||||
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache"
|
||||
|
@ -125,6 +126,14 @@ func (x *Config) GC() *gcconfig.Config {
|
|||
)
|
||||
}
|
||||
|
||||
// Limits returns "limits" subsection as a limitsconfig.Config.
|
||||
func (x *Config) Limits() *limitsconfig.Config {
|
||||
return limitsconfig.From(
|
||||
(*config.Config)(x).
|
||||
Sub("limits"),
|
||||
)
|
||||
}
|
||||
|
||||
// RefillMetabase returns the value of "resync_metabase" config parameter.
|
||||
//
|
||||
// Returns false if the value is not a valid bool.
|
||||
|
|
130
cmd/frostfs-node/config/engine/shard/limits/config.go
Normal file
130
cmd/frostfs-node/config/engine/shard/limits/config.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
package limits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"github.com/spf13/cast"
|
||||
)
|
||||
|
||||
const (
|
||||
NoLimit int64 = math.MaxInt64
|
||||
DefaultIdleTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// From wraps config section into Config.
|
||||
func From(c *config.Config) *Config {
|
||||
return (*Config)(c)
|
||||
}
|
||||
|
||||
// Config is a wrapper over the config section
|
||||
// which provides access to Shard's limits configurations.
|
||||
type Config config.Config
|
||||
|
||||
// Read returns the value of "read" limits config section.
|
||||
func (x *Config) Read() OpConfig {
|
||||
return x.parse("read")
|
||||
}
|
||||
|
||||
// Write returns the value of "write" limits config section.
|
||||
func (x *Config) Write() OpConfig {
|
||||
return x.parse("write")
|
||||
}
|
||||
|
||||
func (x *Config) parse(sub string) OpConfig {
|
||||
c := (*config.Config)(x).Sub(sub)
|
||||
var result OpConfig
|
||||
|
||||
if s := config.Int(c, "max_waiting_ops"); s > 0 {
|
||||
result.MaxWaitingOps = s
|
||||
} else {
|
||||
result.MaxWaitingOps = NoLimit
|
||||
}
|
||||
|
||||
if s := config.Int(c, "max_running_ops"); s > 0 {
|
||||
result.MaxRunningOps = s
|
||||
} else {
|
||||
result.MaxRunningOps = NoLimit
|
||||
}
|
||||
|
||||
if s := config.DurationSafe(c, "idle_timeout"); s > 0 {
|
||||
result.IdleTimeout = s
|
||||
} else {
|
||||
result.IdleTimeout = DefaultIdleTimeout
|
||||
}
|
||||
|
||||
result.Tags = tags(c)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type OpConfig struct {
|
||||
// MaxWaitingOps returns the value of "max_waiting_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxWaitingOps int64
|
||||
// MaxRunningOps returns the value of "max_running_ops" config parameter.
|
||||
//
|
||||
// Equals NoLimit if the value is not a positive number.
|
||||
MaxRunningOps int64
|
||||
// IdleTimeout returns the value of "idle_timeout" config parameter.
|
||||
//
|
||||
// Equals DefaultIdleTimeout if the value is not a valid duration.
|
||||
IdleTimeout time.Duration
|
||||
// Tags returns the value of "tags" config parameter.
|
||||
//
|
||||
// Equals nil if the value is not a valid tags config slice.
|
||||
Tags []IOTagConfig
|
||||
}
|
||||
|
||||
type IOTagConfig struct {
|
||||
Tag string
|
||||
Weight *float64
|
||||
LimitOps *float64
|
||||
ReservedOps *float64
|
||||
}
|
||||
|
||||
func tags(c *config.Config) []IOTagConfig {
|
||||
c = c.Sub("tags")
|
||||
var result []IOTagConfig
|
||||
for i := 0; ; i++ {
|
||||
tag := config.String(c, strconv.Itoa(i)+".tag")
|
||||
if tag == "" {
|
||||
return result
|
||||
}
|
||||
|
||||
var tagConfig IOTagConfig
|
||||
tagConfig.Tag = tag
|
||||
|
||||
v := c.Value(strconv.Itoa(i) + ".weight")
|
||||
if v != nil {
|
||||
w, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.Weight = &w
|
||||
}
|
||||
|
||||
v = c.Value(strconv.Itoa(i) + ".limit_ops")
|
||||
if v != nil {
|
||||
l, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.LimitOps = &l
|
||||
}
|
||||
|
||||
v = c.Value(strconv.Itoa(i) + ".reserved_ops")
|
||||
if v != nil {
|
||||
r, err := cast.ToFloat64E(v)
|
||||
panicOnErr(err)
|
||||
tagConfig.ReservedOps = &r
|
||||
}
|
||||
|
||||
result = append(result, tagConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func panicOnErr(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
|
@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
|
|||
//
|
||||
// Returns PermDefault if the value is not a positive number.
|
||||
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
|
||||
p := config.UintSafe((*config.Config)(l.cfg), "perm")
|
||||
p := config.UintSafe(l.cfg, "perm")
|
||||
if p == 0 {
|
||||
p = PermDefault
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
|
|||
//
|
||||
// Returns false if the value is not a boolean.
|
||||
func (l PersistentPolicyRulesConfig) NoSync() bool {
|
||||
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
|
||||
return config.BoolSafe(l.cfg, "no_sync")
|
||||
}
|
||||
|
||||
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
|
||||
|
|
|
@ -21,10 +21,6 @@ const (
|
|||
|
||||
putSubsection = "put"
|
||||
getSubsection = "get"
|
||||
|
||||
// PutPoolSizeDefault is a default value of routine pool size to
|
||||
// process object.Put requests in object service.
|
||||
PutPoolSizeDefault = 10
|
||||
)
|
||||
|
||||
// Put returns structure that provides access to "put" subsection of
|
||||
|
@ -35,30 +31,6 @@ func Put(c *config.Config) PutConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeRemote() int {
|
||||
v := config.Int(g.cfg, "remote_pool_size")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return PutPoolSizeDefault
|
||||
}
|
||||
|
||||
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeLocal() int {
|
||||
v := config.Int(g.cfg, "local_pool_size")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return PutPoolSizeDefault
|
||||
}
|
||||
|
||||
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
|
||||
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
||||
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
||||
|
|
|
@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) {
|
|||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
|
||||
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
|
||||
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
|
||||
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
|
||||
})
|
||||
|
@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) {
|
|||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
|
||||
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
|
||||
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
|
||||
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
|
||||
}
|
||||
|
|
46
cmd/frostfs-node/config/qos/config.go
Normal file
46
cmd/frostfs-node/config/qos/config.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
const (
|
||||
subsection = "qos"
|
||||
criticalSubSection = "critical"
|
||||
internalSubSection = "internal"
|
||||
)
|
||||
|
||||
// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config
|
||||
// parameter from "qos" section.
|
||||
//
|
||||
// Returns an empty list if not set.
|
||||
func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys {
|
||||
return authorizedKeys(c, criticalSubSection)
|
||||
}
|
||||
|
||||
// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config
|
||||
// parameter from "qos" section.
|
||||
//
|
||||
// Returns an empty list if not set.
|
||||
func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys {
|
||||
return authorizedKeys(c, internalSubSection)
|
||||
}
|
||||
|
||||
func authorizedKeys(c *config.Config, sub string) keys.PublicKeys {
|
||||
strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys")
|
||||
pubs := make(keys.PublicKeys, 0, len(strKeys))
|
||||
|
||||
for i := range strKeys {
|
||||
pub, err := keys.NewPublicKeyFromString(strKeys[i])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err))
|
||||
}
|
||||
|
||||
pubs = append(pubs, pub)
|
||||
}
|
||||
|
||||
return pubs
|
||||
}
|
40
cmd/frostfs-node/config/qos/config_test.go
Normal file
40
cmd/frostfs-node/config/qos/config_test.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package qos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQoSSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Empty(t, CriticalAuthorizedKeys(empty))
|
||||
require.Empty(t, InternalAuthorizedKeys(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
criticalPubs := make(keys.PublicKeys, 2)
|
||||
criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
|
||||
criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
|
||||
|
||||
internalPubs := make(keys.PublicKeys, 2)
|
||||
internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2")
|
||||
internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a")
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c))
|
||||
require.Equal(t, internalPubs, InternalAuthorizedKeys(c))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
}
|
|
@ -11,6 +11,8 @@ const (
|
|||
|
||||
// PutTimeoutDefault is a default timeout of object put request in replicator.
|
||||
PutTimeoutDefault = 5 * time.Second
|
||||
// PoolSizeDefault is a default pool size for put request in replicator.
|
||||
PoolSizeDefault = 10
|
||||
)
|
||||
|
||||
// PutTimeout returns the value of "put_timeout" config parameter
|
||||
|
@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration {
|
|||
|
||||
// PoolSize returns the value of "pool_size" config parameter
|
||||
// from "replicator" section.
|
||||
//
|
||||
// Returns PoolSizeDefault if the value is non-positive integer.
|
||||
func PoolSize(c *config.Config) int {
|
||||
return int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
||||
v := int(config.IntSafe(c.Sub(subsection), "pool_size"))
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return PoolSizeDefault
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) {
|
|||
empty := configtest.EmptyConfig()
|
||||
|
||||
require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty))
|
||||
require.Equal(t, 0, replicatorconfig.PoolSize(empty))
|
||||
require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty))
|
||||
})
|
||||
|
||||
const path = "../../../../config/example/node"
|
||||
|
|
43
cmd/frostfs-node/config/rpc/config.go
Normal file
43
cmd/frostfs-node/config/rpc/config.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package rpcconfig
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
)
|
||||
|
||||
const (
|
||||
subsection = "rpc"
|
||||
limitsSubsection = "limits"
|
||||
)
|
||||
|
||||
type LimitConfig struct {
|
||||
Methods []string
|
||||
MaxOps int64
|
||||
}
|
||||
|
||||
// Limits returns the "limits" config from "rpc" section.
|
||||
func Limits(c *config.Config) []LimitConfig {
|
||||
c = c.Sub(subsection).Sub(limitsSubsection)
|
||||
|
||||
var limits []LimitConfig
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
si := strconv.FormatUint(i, 10)
|
||||
sc := c.Sub(si)
|
||||
|
||||
methods := config.StringSliceSafe(sc, "methods")
|
||||
if len(methods) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
maxOps := config.IntSafe(sc, "max_ops")
|
||||
if maxOps == 0 {
|
||||
panic("no max operations for method group")
|
||||
}
|
||||
|
||||
limits = append(limits, LimitConfig{methods, maxOps})
|
||||
}
|
||||
|
||||
return limits
|
||||
}
|
53
cmd/frostfs-node/config/rpc/config_test.go
Normal file
53
cmd/frostfs-node/config/rpc/config_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package rpcconfig
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRPCSection(t *testing.T) {
|
||||
t.Run("defaults", func(t *testing.T) {
|
||||
require.Empty(t, Limits(configtest.EmptyConfig()))
|
||||
})
|
||||
|
||||
t.Run("correct config", func(t *testing.T) {
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
limits := Limits(c)
|
||||
require.Len(t, limits, 2)
|
||||
|
||||
limit0 := limits[0]
|
||||
limit1 := limits[1]
|
||||
|
||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
||||
require.Equal(t, limit0.MaxOps, int64(1000))
|
||||
|
||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("no max operations", func(t *testing.T) {
|
||||
const path = "testdata/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
require.Panics(t, func() { _ = Limits(c) })
|
||||
}
|
||||
|
||||
configtest.ForEachFileType(path, fileConfigTest)
|
||||
|
||||
t.Run("ENV", func(t *testing.T) {
|
||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||
})
|
||||
})
|
||||
}
|
3
cmd/frostfs-node/config/rpc/testdata/node.env
vendored
Normal file
3
cmd/frostfs-node/config/rpc/testdata/node.env
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
18
cmd/frostfs-node/config/rpc/testdata/node.json
vendored
Normal file
18
cmd/frostfs-node/config/rpc/testdata/node.json
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"rpc": {
|
||||
"limits": [
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
||||
"/neo.fs.v2.object.ObjectService/Put"
|
||||
]
|
||||
},
|
||||
{
|
||||
"methods": [
|
||||
"/neo.fs.v2.object.ObjectService/Get"
|
||||
],
|
||||
"max_ops": 10000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
8
cmd/frostfs-node/config/rpc/testdata/node.yaml
vendored
Normal file
8
cmd/frostfs-node/config/rpc/testdata/node.yaml
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
rpc:
|
||||
limits:
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
||||
- /neo.fs.v2.object.ObjectService/Put
|
||||
- methods:
|
||||
- /neo.fs.v2.object.ObjectService/Get
|
||||
max_ops: 10000
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"net"
|
||||
|
||||
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
|
||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||
|
@ -28,7 +29,7 @@ import (
|
|||
func initContainerService(_ context.Context, c *cfg) {
|
||||
// container wrapper that tries to invoke notary
|
||||
// requests if chain is configured so
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
|
||||
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
|
||||
fatalOnErr(err)
|
||||
|
||||
c.shared.cnrClient = wrap
|
||||
|
@ -42,11 +43,12 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
fatalOnErr(err)
|
||||
|
||||
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
||||
if cacheSize > 0 {
|
||||
if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
|
||||
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
|
||||
}
|
||||
|
||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
|
||||
|
||||
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
|
||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
|
||||
|
@ -56,7 +58,9 @@ func initContainerService(_ context.Context, c *cfg) {
|
|||
&c.key.PrivateKey,
|
||||
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
|
||||
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
|
||||
containerService.NewSplitterService(
|
||||
c.cfgContainer.containerBatchSize, c.respSvc,
|
||||
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
|
||||
),
|
||||
)
|
||||
service = containerService.NewAuditService(service, c.log, c.audit)
|
||||
|
@ -96,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
// TODO: use owner directly from the event after neofs-contract#256 will become resolved
|
||||
// but don't forget about the profit of reading the new container and caching it:
|
||||
// creation success are most commonly tracked by polling GET op.
|
||||
cnr, err := cnrSrc.Get(ev.ID)
|
||||
cnr, err := cnrSrc.Get(ctx, ev.ID)
|
||||
if err == nil {
|
||||
containerCache.containerCache.set(ev.ID, cnr, nil)
|
||||
} else {
|
||||
|
@ -217,20 +221,25 @@ type morphContainerReader struct {
|
|||
src containerCore.Source
|
||||
|
||||
lister interface {
|
||||
ContainersOf(*user.ID) ([]cid.ID, error)
|
||||
ContainersOf(context.Context, *user.ID) ([]cid.ID, error)
|
||||
IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error
|
||||
}
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
|
||||
return x.src.Get(id)
|
||||
func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) {
|
||||
return x.src.Get(ctx, id)
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
|
||||
return x.src.DeletionInfo(id)
|
||||
func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) {
|
||||
return x.src.DeletionInfo(ctx, id)
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
|
||||
return x.lister.ContainersOf(id)
|
||||
func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) {
|
||||
return x.lister.ContainersOf(ctx, id)
|
||||
}
|
||||
|
||||
func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error {
|
||||
return x.lister.IterateContainersOf(ctx, id, processCID)
|
||||
}
|
||||
|
||||
type morphContainerWriter struct {
|
||||
|
|
|
@ -7,9 +7,12 @@ import (
|
|||
|
||||
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
|
||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
@ -50,7 +53,14 @@ func initControlService(ctx context.Context, c *cfg) {
|
|||
return
|
||||
}
|
||||
|
||||
c.cfgControlService.server = grpc.NewServer()
|
||||
c.cfgControlService.server = grpc.NewServer(
|
||||
grpc.ChainUnaryInterceptor(
|
||||
qos.NewSetCriticalIOTagUnaryServerInterceptor(),
|
||||
metrics.NewUnaryServerInterceptor(),
|
||||
tracing.NewUnaryServerInterceptor(),
|
||||
),
|
||||
// control service has no stream methods, so no stream interceptors added
|
||||
)
|
||||
|
||||
c.onShutdown(func() {
|
||||
stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -42,7 +43,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int
|
|||
}
|
||||
}
|
||||
|
||||
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
|
||||
func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
|
@ -55,7 +56,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er
|
|||
return result.subject, result.err
|
||||
}
|
||||
|
||||
subj, err := m.subjProvider.GetSubject(addr)
|
||||
subj, err := m.subjProvider.GetSubject(ctx, addr)
|
||||
if err != nil {
|
||||
if m.isCacheableError(err) {
|
||||
m.subjCache.Add(addr, subjectWithError{
|
||||
|
@ -69,7 +70,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er
|
|||
return subj, nil
|
||||
}
|
||||
|
||||
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
|
||||
func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) {
|
||||
hit := false
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
|
@ -82,7 +83,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.Sub
|
|||
return result.subject, result.err
|
||||
}
|
||||
|
||||
subjExt, err := m.subjProvider.GetSubjectExtended(addr)
|
||||
subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr)
|
||||
if err != nil {
|
||||
if m.isCacheableError(err) {
|
||||
m.subjExtCache.Add(addr, subjectExtWithError{
|
||||
|
|
|
@ -4,14 +4,19 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
||||
rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
|
||||
qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
@ -130,12 +135,16 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr
|
|||
serverOpts := []grpc.ServerOption{
|
||||
grpc.MaxRecvMsgSize(maxRecvMsgSize),
|
||||
grpc.ChainUnaryInterceptor(
|
||||
qos.NewUnaryServerInterceptor(),
|
||||
metrics.NewUnaryServerInterceptor(),
|
||||
tracing.NewUnaryServerInterceptor(),
|
||||
qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
||||
),
|
||||
grpc.ChainStreamInterceptor(
|
||||
qos.NewStreamServerInterceptor(),
|
||||
metrics.NewStreamServerInterceptor(),
|
||||
tracing.NewStreamServerInterceptor(),
|
||||
qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }),
|
||||
),
|
||||
}
|
||||
|
||||
|
@ -224,3 +233,54 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger
|
|||
|
||||
l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||
}
|
||||
|
||||
func initRPCLimiter(c *cfg) error {
|
||||
var limits []limiting.KeyLimit
|
||||
for _, l := range rpcconfig.Limits(c.appCfg) {
|
||||
limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps})
|
||||
}
|
||||
|
||||
if err := validateRPCLimits(c, limits); err != nil {
|
||||
return fmt.Errorf("validate RPC limits: %w", err)
|
||||
}
|
||||
|
||||
limiter, err := limiting.NewSemaphoreLimiter(limits)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create RPC limiter: %w", err)
|
||||
}
|
||||
|
||||
c.cfgGRPC.limiter.Store(limiter)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error {
|
||||
availableMethods := getAvailableMethods(c.cfgGRPC.servers)
|
||||
for _, limit := range limits {
|
||||
for _, method := range limit.Keys {
|
||||
if _, ok := availableMethods[method]; !ok {
|
||||
return fmt.Errorf("set limit on an unknown method %q", method)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAvailableMethods(servers []grpcServer) map[string]struct{} {
|
||||
res := make(map[string]struct{})
|
||||
for _, server := range servers {
|
||||
for _, method := range getMethodsForServer(server.Server) {
|
||||
res[method] = struct{}{}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getMethodsForServer(server *grpc.Server) []string {
|
||||
var res []string
|
||||
for service, info := range server.GetServiceInfo() {
|
||||
for _, method := range info.Methods {
|
||||
res = append(res, fmt.Sprintf("/%s/%s", service, method.Name))
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -101,6 +101,7 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
|
||||
initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) })
|
||||
initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
|
||||
initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) })
|
||||
|
||||
initAccessPolicyEngine(ctx, c)
|
||||
initAndLog(ctx, c, "access policy engine", func(c *cfg) {
|
||||
|
@ -116,6 +117,8 @@ func initApp(ctx context.Context, c *cfg) {
|
|||
initAndLog(ctx, c, "apemanager", initAPEManagerService)
|
||||
initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) })
|
||||
|
||||
initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) })
|
||||
|
||||
initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) })
|
||||
}
|
||||
|
||||
|
@ -134,7 +137,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
|
|||
err := stopper(ctx)
|
||||
if err != nil {
|
||||
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -35,20 +35,16 @@ func (c *cfg) initMorphComponents(ctx context.Context) {
|
|||
|
||||
lookupScriptHashesInNNS(c) // smart contract auto negotiation
|
||||
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport,
|
||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||
err := c.cfgMorph.client.EnableNotarySupport(
|
||||
client.WithProxyContract(
|
||||
c.cfgMorph.proxyScriptHash,
|
||||
),
|
||||
)
|
||||
fatalOnErr(err)
|
||||
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary())
|
||||
c.log.Info(ctx, logs.FrostFSNodeNotarySupport)
|
||||
|
||||
wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0)
|
||||
fatalOnErr(err)
|
||||
|
||||
var netmapSource netmap.Source
|
||||
|
@ -100,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
fatalOnErr(err)
|
||||
|
@ -116,15 +112,9 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
}
|
||||
|
||||
c.cfgMorph.client = cli
|
||||
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
|
||||
}
|
||||
|
||||
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||
// skip notary deposit in non-notary environments
|
||||
if !c.cfgMorph.notaryEnabled {
|
||||
return
|
||||
}
|
||||
|
||||
tx, vub, err := makeNotaryDeposit(ctx, c)
|
||||
fatalOnErr(err)
|
||||
|
||||
|
@ -161,7 +151,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error
|
|||
}
|
||||
|
||||
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error {
|
||||
if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil {
|
||||
if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -178,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
@ -233,27 +223,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse
|
|||
subs map[event.Type][]event.Handler,
|
||||
) {
|
||||
for typ, handlers := range subs {
|
||||
pi := event.NotificationParserInfo{}
|
||||
pi.SetType(typ)
|
||||
pi.SetScriptHash(scHash)
|
||||
|
||||
p, ok := parsers[typ]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("missing parser for event %s", typ))
|
||||
}
|
||||
|
||||
pi.SetParser(p)
|
||||
|
||||
lis.SetNotificationParser(pi)
|
||||
|
||||
for _, h := range handlers {
|
||||
hi := event.NotificationHandlerInfo{}
|
||||
hi.SetType(typ)
|
||||
hi.SetScriptHash(scHash)
|
||||
hi.SetHandler(h)
|
||||
|
||||
lis.RegisterNotificationHandler(hi)
|
||||
}
|
||||
lis.RegisterNotificationHandler(event.NotificationHandlerInfo{
|
||||
Contract: scHash,
|
||||
Type: typ,
|
||||
Parser: p,
|
||||
Handlers: handlers,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,10 +262,6 @@ func lookupScriptHashesInNNS(c *cfg) {
|
|||
)
|
||||
|
||||
for _, t := range targets {
|
||||
if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled {
|
||||
continue // ignore proxy contract if notary disabled
|
||||
}
|
||||
|
||||
if emptyHash.Equals(*t.h) {
|
||||
*t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName)
|
||||
fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err)
|
||||
|
|
|
@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
|
||||
s.setControlNetmapStatus(ctrlNetSt)
|
||||
}
|
||||
|
||||
// sets the current node state to the given value. Subsequent cfg.bootstrap
|
||||
|
@ -193,16 +193,14 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
}
|
||||
})
|
||||
|
||||
if c.cfgMorph.notaryEnabled {
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) {
|
||||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// bootstrapNode adds current node to the Network map.
|
||||
|
@ -241,7 +239,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser
|
|||
// initNetmapState inits current Network map state.
|
||||
// Must be called after Morph components initialization.
|
||||
func initNetmapState(ctx context.Context, c *cfg) {
|
||||
epoch, err := c.cfgNetmap.wrapper.Epoch()
|
||||
epoch, err := c.cfgNetmap.wrapper.Epoch(ctx)
|
||||
fatalOnErrDetails("could not initialize current epoch number", err)
|
||||
|
||||
var ni *netmapSDK.NodeInfo
|
||||
|
@ -280,7 +278,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string {
|
|||
}
|
||||
|
||||
func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates()
|
||||
nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -293,7 +291,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
|
|||
}
|
||||
}
|
||||
|
||||
node, err := c.netmapLocalNodeState(epoch)
|
||||
node, err := c.netmapLocalNodeState(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -314,9 +312,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm
|
|||
return candidate, nil
|
||||
}
|
||||
|
||||
func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
// calculate current network state
|
||||
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch)
|
||||
nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -378,8 +376,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro
|
|||
return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {})
|
||||
}
|
||||
|
||||
func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) {
|
||||
epoch, err := c.netMapSource.Epoch()
|
||||
func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) {
|
||||
epoch, err := c.netMapSource.Epoch(ctx)
|
||||
if err != nil {
|
||||
return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err)
|
||||
}
|
||||
|
@ -392,7 +390,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error {
|
|||
}
|
||||
|
||||
func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error {
|
||||
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration()
|
||||
netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("read network settings to check maintenance allowance: %w", err)
|
||||
} else if !netSettings.MaintenanceModeAllowed {
|
||||
|
@ -425,7 +423,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res)
|
||||
return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash)
|
||||
}
|
||||
|
||||
type netInfo struct {
|
||||
|
@ -440,7 +438,7 @@ type netInfo struct {
|
|||
msPerBlockRdr func() (int64, error)
|
||||
}
|
||||
|
||||
func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
|
||||
func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) {
|
||||
magic, err := n.magic.MagicNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -450,7 +448,7 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
|
|||
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
|
||||
ni.SetMagicNumber(magic)
|
||||
|
||||
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
|
||||
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||
|
@ -55,11 +54,11 @@ type objectSvc struct {
|
|||
patch *patchsvc.Service
|
||||
}
|
||||
|
||||
func (c *cfg) MaxObjectSize() uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
func (c *cfg) MaxObjectSize(ctx context.Context) uint64 {
|
||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx)
|
||||
if err != nil {
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -123,8 +122,8 @@ type innerRingFetcherWithNotary struct {
|
|||
sidechain *morphClient.Client
|
||||
}
|
||||
|
||||
func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
|
||||
keys, err := fn.sidechain.NeoFSAlphabetList()
|
||||
func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) {
|
||||
keys, err := fn.sidechain.NeoFSAlphabetList(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err)
|
||||
}
|
||||
|
@ -137,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
type innerRingFetcherWithoutNotary struct {
|
||||
nm *nmClient.Client
|
||||
}
|
||||
|
||||
func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
|
||||
keys, err := f.nm.GetInnerRingList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err)
|
||||
}
|
||||
|
||||
result := make([][]byte, 0, len(keys))
|
||||
for i := range keys {
|
||||
result = append(result, keys[i].Bytes())
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func initObjectService(c *cfg) {
|
||||
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
|
||||
|
||||
|
@ -187,7 +168,7 @@ func initObjectService(c *cfg) {
|
|||
sPatch := createPatchSvc(sGet, sPut)
|
||||
|
||||
// build service pipeline
|
||||
// grpc | audit | <metrics> | signature | response | acl | ape | split
|
||||
// grpc | audit | qos | <metrics> | signature | response | acl | ape | split
|
||||
|
||||
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
||||
|
||||
|
@ -210,7 +191,8 @@ func initObjectService(c *cfg) {
|
|||
|
||||
c.shared.metricsSvc = objectService.NewMetricCollector(
|
||||
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
|
||||
auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit)
|
||||
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
|
||||
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
|
||||
server := objectTransportGRPC.New(auditSvc)
|
||||
|
||||
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
|
||||
|
@ -234,8 +216,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
prm.MarkAsGarbage(addr)
|
||||
prm.WithForceRemoval()
|
||||
|
||||
_, err := ls.Inhume(ctx, prm)
|
||||
return err
|
||||
return ls.Inhume(ctx, prm)
|
||||
}
|
||||
|
||||
remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor)
|
||||
|
@ -285,10 +266,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
var inhumePrm engine.InhumePrm
|
||||
inhumePrm.MarkAsGarbage(addr)
|
||||
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
if err := ls.Inhume(ctx, inhumePrm); err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}),
|
||||
|
@ -305,13 +285,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
}
|
||||
|
||||
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||
if c.cfgMorph.client.ProbeNotary() {
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
}
|
||||
return &innerRingFetcherWithoutNotary{
|
||||
nm: c.cfgNetmap.wrapper,
|
||||
return &innerRingFetcherWithNotary{
|
||||
sidechain: c.cfgMorph.client,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -351,7 +326,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
|
|||
c,
|
||||
c.cfgNetmap.state,
|
||||
irFetcher,
|
||||
objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
|
||||
objectwriter.WithLogger(c.log),
|
||||
objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
|
||||
)
|
||||
|
@ -500,8 +474,7 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
|
|||
|
||||
prm.WithTarget(tombstone, addrs...)
|
||||
|
||||
_, err := e.engine.Inhume(ctx, prm)
|
||||
return err
|
||||
return e.engine.Inhume(ctx, prm)
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
|
||||
|
|
95
cmd/frostfs-node/qos.go
Normal file
95
cmd/frostfs-node/qos.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type cfgQoSService struct {
|
||||
netmapSource netmap.Source
|
||||
logger *logger.Logger
|
||||
allowedCriticalPubs [][]byte
|
||||
allowedInternalPubs [][]byte
|
||||
}
|
||||
|
||||
func initQoSService(c *cfg) {
|
||||
criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg)
|
||||
internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg)
|
||||
rawCriticalPubs := make([][]byte, 0, len(criticalPubs))
|
||||
rawInternalPubs := make([][]byte, 0, len(internalPubs))
|
||||
for i := range criticalPubs {
|
||||
rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes())
|
||||
}
|
||||
for i := range internalPubs {
|
||||
rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes())
|
||||
}
|
||||
|
||||
c.cfgQoSService = cfgQoSService{
|
||||
netmapSource: c.netMapSource,
|
||||
logger: c.log,
|
||||
allowedCriticalPubs: rawCriticalPubs,
|
||||
allowedInternalPubs: rawInternalPubs,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
|
||||
rawTag, defined := qosTagging.IOTagFromContext(ctx)
|
||||
if !defined {
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
ioTag, err := qos.FromRawString(rawTag)
|
||||
if err != nil {
|
||||
s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
|
||||
switch ioTag {
|
||||
case qos.IOTagClient:
|
||||
return ctx
|
||||
case qos.IOTagCritical:
|
||||
for _, pk := range s.allowedCriticalPubs {
|
||||
if bytes.Equal(pk, requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
for _, node := range nm.Nodes() {
|
||||
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
case qos.IOTagInternal:
|
||||
for _, pk := range s.allowedInternalPubs {
|
||||
if bytes.Equal(pk, requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
nm, err := s.netmapSource.GetNetMap(ctx, 0)
|
||||
if err != nil {
|
||||
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
for _, node := range nm.Nodes() {
|
||||
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
|
||||
return ctx
|
||||
}
|
||||
}
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
default:
|
||||
s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag))
|
||||
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue