forked from TrueCloudLab/frostfs-node
Compare commits
10 commits
master
...
support/v0
Author | SHA1 | Date | |
---|---|---|---|
2be938f3cd | |||
eac61fe9b2 | |||
dc81b4b50c | |||
98da032324 | |||
5acc13fa94 | |||
e0f0b93b5e | |||
eb5248621a | |||
02be6a4341 | |||
368774be95 | |||
b9ef294b99 |
350 changed files with 1707 additions and 5065 deletions
|
@ -39,7 +39,7 @@ linters-settings:
|
|||
alias: objectSDK
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/linters/external_linters.so
|
||||
path: bin/external_linters.so
|
||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||
settings:
|
||||
noliteral:
|
||||
|
|
|
@ -47,15 +47,6 @@ repos:
|
|||
types: [go]
|
||||
language: system
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: gofumpt
|
||||
name: gofumpt
|
||||
entry: make fumpt
|
||||
pass_filenames: false
|
||||
types: [go]
|
||||
language: system
|
||||
|
||||
- repo: https://github.com/TekWizely/pre-commit-golang
|
||||
rev: v1.0.0-rc.1
|
||||
hooks:
|
||||
|
|
80
Makefile
80
Makefile
|
@ -10,14 +10,6 @@ HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
|||
GO_VERSION ?= 1.21
|
||||
LINT_VERSION ?= 1.54.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||
PROTOC_OS_VERSION=osx-x86_64
|
||||
ifeq ($(shell uname), Linux)
|
||||
PROTOC_OS_VERSION=linux-x86_64
|
||||
endif
|
||||
STATICCHECK_VERSION ?= 2023.1.6
|
||||
ARCH = amd64
|
||||
|
||||
BIN = bin
|
||||
|
@ -34,17 +26,11 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
||||
sed "s/-/~/")-${OS_RELEASE}
|
||||
|
||||
OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
|
||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||
TMP_DIR := .cache
|
||||
PROTOBUF_DIR ?= $(abspath $(BIN))/protobuf
|
||||
PROTOC_DIR ?= $(PROTOBUF_DIR)/protoc-v$(PROTOC_VERSION)
|
||||
PROTOC_GEN_GO_DIR ?= $(PROTOBUF_DIR)/protoc-gen-go-$(PROTOC_GEN_GO_VERSION)
|
||||
PROTOGEN_FROSTFS_DIR ?= $(PROTOBUF_DIR)/protogen-$(PROTOGEN_FROSTFS_VERSION)
|
||||
STATICCHECK_DIR ?= $(abspath $(BIN))/staticcheck
|
||||
STATICCHECK_VERSION_DIR ?= $(STATICCHECK_DIR)/$(STATICCHECK_VERSION)
|
||||
|
||||
.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
|
||||
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
|
||||
prepare-release debpackage pre-commit unpre-commit
|
||||
|
||||
# To build a specific binary, use it's name prefix with bin/ as a target
|
||||
|
@ -84,40 +70,24 @@ dep:
|
|||
CGO_ENABLED=0 \
|
||||
go mod tidy -v && echo OK
|
||||
|
||||
# Build export-metrics
|
||||
export-metrics: dep
|
||||
@printf "⇒ Build export-metrics\n"
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -trimpath -o bin/export-metrics ./scripts/export-metrics
|
||||
|
||||
# Regenerate proto files:
|
||||
protoc:
|
||||
@if [ ! -d "$(PROTOC_DIR)" ] || [ ! -d "$(PROTOC_GEN_GO_DIR)" ] || [ ! -d "$(PROTOGEN_FROSTFS_DIR)" ]; then \
|
||||
make protoc-install; \
|
||||
fi
|
||||
@for f in `find . -type f -name '*.proto' -not -path './bin/*'`; do \
|
||||
@GOPRIVATE=github.com/TrueCloudLab go mod vendor
|
||||
# Install specific version for protobuf lib
|
||||
@go list -f '{{.Path}}/...@{{.Version}}' -m github.com/golang/protobuf | xargs go install -v
|
||||
@GOBIN=$(abspath $(BIN)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
||||
# Protoc generate
|
||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||
echo "⇒ Processing $$f "; \
|
||||
$(PROTOC_DIR)/bin/protoc \
|
||||
--proto_path=.:$(PROTOC_DIR)/include:/usr/local/include \
|
||||
--plugin=protoc-gen-go=$(PROTOC_GEN_GO_DIR)/protoc-gen-go \
|
||||
--plugin=protoc-gen-go-frostfs=$(PROTOGEN_FROSTFS_DIR)/protogen \
|
||||
protoc \
|
||||
--proto_path=.:./vendor:/usr/local/include \
|
||||
--plugin=protoc-gen-go-frostfs=$(BIN)/protogen \
|
||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
||||
--go_out=. --go_opt=paths=source_relative \
|
||||
--go-grpc_opt=require_unimplemented_servers=false \
|
||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
||||
done
|
||||
|
||||
protoc-install:
|
||||
@rm -rf $(PROTOBUF_DIR)
|
||||
@mkdir $(PROTOBUF_DIR)
|
||||
@echo "⇒ Installing protoc... "
|
||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||
@rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip
|
||||
@echo "⇒ Installing protoc-gen-go..."
|
||||
@GOBIN=$(PROTOC_GEN_GO_DIR) go install -v google.golang.org/protobuf/...@$(PROTOC_GEN_GO_VERSION)
|
||||
@echo "⇒ Instaling protogen FrostFS plugin..."
|
||||
@GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION)
|
||||
rm -rf vendor
|
||||
|
||||
# Build FrostFS component's docker image
|
||||
image-%:
|
||||
|
@ -146,17 +116,18 @@ docker/%:
|
|||
|
||||
|
||||
# Run all code formatters
|
||||
fmts: fumpt imports
|
||||
fmts: fmt imports
|
||||
|
||||
# Reformat code
|
||||
fmt:
|
||||
@echo "⇒ Processing gofmt check"
|
||||
@gofmt -s -w cmd/ pkg/ misc/
|
||||
|
||||
# Reformat imports
|
||||
imports:
|
||||
@echo "⇒ Processing goimports check"
|
||||
@goimports -w cmd/ pkg/ misc/
|
||||
|
||||
fumpt:
|
||||
@echo "⇒ Processing gofumpt check"
|
||||
@gofumpt -l -w cmd/ pkg/ misc/
|
||||
|
||||
# Run Unit Test with go test
|
||||
test:
|
||||
@echo "⇒ Running go test"
|
||||
|
@ -167,8 +138,6 @@ pre-commit-run:
|
|||
|
||||
# Install linters
|
||||
lint-install:
|
||||
@rm -rf $(OUTPUT_LINT_DIR)
|
||||
@mkdir $(OUTPUT_LINT_DIR)
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
|
@ -180,22 +149,18 @@ lint-install:
|
|||
# Run linters
|
||||
lint:
|
||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
||||
make lint-install; \
|
||||
echo "Run make lint-install"; \
|
||||
exit 1; \
|
||||
fi
|
||||
$(LINT_DIR)/golangci-lint run
|
||||
|
||||
# Install staticcheck
|
||||
staticcheck-install:
|
||||
@rm -rf $(STATICCHECK_DIR)
|
||||
@mkdir $(STATICCHECK_DIR)
|
||||
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||
@go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
# Run staticcheck
|
||||
staticcheck-run:
|
||||
@if [ ! -d "$(STATICCHECK_VERSION_DIR)" ]; then \
|
||||
make staticcheck-install; \
|
||||
fi
|
||||
@$(STATICCHECK_VERSION_DIR)/staticcheck ./...
|
||||
@staticcheck ./...
|
||||
|
||||
# Run linters in Docker
|
||||
docker/lint:
|
||||
|
@ -219,6 +184,7 @@ version:
|
|||
|
||||
# Delete built artifacts
|
||||
clean:
|
||||
rm -rf vendor
|
||||
rm -rf .cache
|
||||
rm -rf $(BIN)
|
||||
rm -rf $(RELEASE)
|
||||
|
|
|
@ -50,12 +50,12 @@ func initConfig(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
pathDir := filepath.Dir(configPath)
|
||||
err = os.MkdirAll(pathDir, 0o700)
|
||||
err = os.MkdirAll(pathDir, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create dir %s: %w", pathDir, err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0o600)
|
||||
f, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open %s: %w", configPath, err)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/rolemgmt"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
|
@ -57,8 +56,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error {
|
|||
inv := invoker.New(c, nil)
|
||||
|
||||
if dumpStorage || dumpAlphabet || dumpProxy {
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err = r.GetContractByID(1)
|
||||
nnsCs, err = c.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
|
@ -30,9 +29,8 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
r := management.NewReader(inv)
|
||||
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := c.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
}
|
||||
|
@ -89,8 +87,7 @@ func setConfigCmd(cmd *cobra.Command, args []string) error {
|
|||
return fmt.Errorf("can't initialize context: %w", err)
|
||||
}
|
||||
|
||||
r := management.NewReader(wCtx.ReadOnlyInvoker)
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := wCtx.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -23,15 +22,14 @@ import (
|
|||
|
||||
var errInvalidContainerResponse = errors.New("invalid response from container contract")
|
||||
|
||||
func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker) (util.Uint160, error) {
|
||||
func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker, c Client) (util.Uint160, error) {
|
||||
s, err := cmd.Flags().GetString(containerContractFlag)
|
||||
var ch util.Uint160
|
||||
if err == nil {
|
||||
ch, err = util.Uint160DecodeStringLE(s)
|
||||
}
|
||||
if err != nil {
|
||||
r := management.NewReader(inv)
|
||||
nnsCs, err := r.GetContractByID(1)
|
||||
nnsCs, err := c.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
|
||||
}
|
||||
|
@ -80,7 +78,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
|
|||
|
||||
inv := invoker.New(c, nil)
|
||||
|
||||
ch, err := getContainerContractHash(cmd, inv)
|
||||
ch, err := getContainerContractHash(cmd, inv, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get contaract hash: %w", err)
|
||||
}
|
||||
|
@ -170,7 +168,7 @@ func listContainers(cmd *cobra.Command, _ []string) error {
|
|||
|
||||
inv := invoker.New(c, nil)
|
||||
|
||||
ch, err := getContainerContractHash(cmd, inv)
|
||||
ch, err := getContainerContractHash(cmd, inv, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get contaract hash: %w", err)
|
||||
}
|
||||
|
@ -300,8 +298,7 @@ func parseContainers(filename string) ([]Container, error) {
|
|||
}
|
||||
|
||||
func fetchContainerContractHash(wCtx *initializeContext) (util.Uint160, error) {
|
||||
r := management.NewReader(wCtx.ReadOnlyInvoker)
|
||||
nnsCs, err := r.GetContractByID(1)
|
||||
nnsCs, err := wCtx.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return util.Uint160{}, fmt.Errorf("can't get NNS contract state: %w", err)
|
||||
}
|
||||
|
|
|
@ -76,8 +76,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
nnsCs, err := r.GetContractByID(1)
|
||||
nnsCs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch NNS contract state: %w", err)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -37,8 +36,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("can't create N3 client: %w", err)
|
||||
}
|
||||
|
||||
r := management.NewReader(invoker.New(c, nil))
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := c.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -21,8 +20,7 @@ func forceNewEpochCmd(cmd *cobra.Command, _ []string) error {
|
|||
return fmt.Errorf("can't to initialize context: %w", err)
|
||||
}
|
||||
|
||||
r := management.NewReader(wCtx.ReadOnlyInvoker)
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := wCtx.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package morph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// neo-go doesn't support []util.Uint160 type:
|
||||
// https://github.com/nspcc-dev/neo-go/blob/v0.103.0/pkg/smartcontract/parameter.go#L262
|
||||
// Thus, return []smartcontract.Parameter.
|
||||
func getFrostfsIDAuthorizedKeys(v *viper.Viper, defaultOwner util.Uint160) ([]smartcontract.Parameter, error) {
|
||||
var res []smartcontract.Parameter
|
||||
res = append(res, smartcontract.Parameter{Type: smartcontract.Hash160Type, Value: defaultOwner})
|
||||
|
||||
ks := v.GetStringSlice(frostfsIDAuthorizedKeysConfigKey)
|
||||
for i := range ks {
|
||||
h, err := address.StringToUint160(ks[i])
|
||||
if err == nil {
|
||||
res = append(res, smartcontract.Parameter{Type: smartcontract.Hash160Type, Value: h})
|
||||
continue
|
||||
}
|
||||
|
||||
h, err = util.Uint160DecodeStringLE(ks[i])
|
||||
if err == nil {
|
||||
res = append(res, smartcontract.Parameter{Type: smartcontract.Hash160Type, Value: h})
|
||||
continue
|
||||
}
|
||||
|
||||
pk, err := keys.NewPublicKeyFromString(ks[i])
|
||||
if err == nil {
|
||||
res = append(res, smartcontract.Parameter{Type: smartcontract.Hash160Type, Value: pk.GetScriptHash()})
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("frostfsid: #%d item in authorized_keys is invalid: '%s'", i, ks[i])
|
||||
}
|
||||
return res, nil
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
package morph
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFrostfsIDConfig(t *testing.T) {
|
||||
pks := make([]*keys.PrivateKey, 4)
|
||||
for i := range pks {
|
||||
pk, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
pks[i] = pk
|
||||
}
|
||||
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.authorized_keys", []string{
|
||||
pks[0].GetScriptHash().StringLE(),
|
||||
address.Uint160ToString(pks[1].GetScriptHash()),
|
||||
hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()),
|
||||
hex.EncodeToString(pks[3].PublicKey().Bytes()),
|
||||
})
|
||||
|
||||
comm := util.Uint160{1, 2, 3}
|
||||
actual, err := getFrostfsIDAuthorizedKeys(v, comm)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(pks)+1, len(actual))
|
||||
require.Equal(t, smartcontract.Hash160Type, actual[0].Type)
|
||||
require.Equal(t, comm, actual[0].Value)
|
||||
for i := range pks {
|
||||
require.Equal(t, smartcontract.Hash160Type, actual[i+1].Type)
|
||||
require.Equal(t, pks[i].GetScriptHash(), actual[i+1].Value)
|
||||
}
|
||||
|
||||
t.Run("bad key", func(t *testing.T) {
|
||||
v := viper.New()
|
||||
v.Set("frostfsid.authorized_keys", []string{"abc"})
|
||||
|
||||
_, err := getFrostfsIDAuthorizedKeys(v, comm)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
|
@ -76,7 +76,7 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
|||
}
|
||||
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
|
||||
f, err := os.OpenFile(p, os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create wallet file: %w", err)
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
|
@ -16,7 +15,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
|
@ -314,8 +312,7 @@ func (c *initializeContext) nnsContractState() (*state.Contract, error) {
|
|||
return c.nnsCs, nil
|
||||
}
|
||||
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -377,18 +374,36 @@ func (c *clientContext) awaitTx(cmd *cobra.Command) error {
|
|||
func awaitTx(cmd *cobra.Command, c Client, txs []hashVUBPair) error {
|
||||
cmd.Println("Waiting for transactions to persist...")
|
||||
|
||||
const pollInterval = time.Second
|
||||
|
||||
tick := time.NewTicker(pollInterval)
|
||||
defer tick.Stop()
|
||||
|
||||
at := trigger.Application
|
||||
|
||||
var retErr error
|
||||
|
||||
currBlock, err := c.GetBlockCount()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch current block height: %w", err)
|
||||
}
|
||||
|
||||
loop:
|
||||
for i := range txs {
|
||||
var it int
|
||||
var pollInterval time.Duration
|
||||
var pollIntervalChanged bool
|
||||
for {
|
||||
res, err := c.GetApplicationLog(txs[i].hash, &at)
|
||||
if err == nil {
|
||||
if retErr == nil && len(res.Executions) > 0 && res.Executions[0].VMState != vmstate.Halt {
|
||||
retErr = fmt.Errorf("tx %d persisted in %s state: %s",
|
||||
i, res.Executions[0].VMState, res.Executions[0].FaultException)
|
||||
}
|
||||
continue loop
|
||||
}
|
||||
if txs[i].vub < currBlock {
|
||||
return fmt.Errorf("tx was not persisted: vub=%d, height=%d", txs[i].vub, currBlock)
|
||||
}
|
||||
for range tick.C {
|
||||
// We must fetch current height before application log, to avoid race condition.
|
||||
currBlock, err := c.GetBlockCount()
|
||||
currBlock, err = c.GetBlockCount()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't fetch current block height: %w", err)
|
||||
}
|
||||
|
@ -403,43 +418,12 @@ loop:
|
|||
if txs[i].vub < currBlock {
|
||||
return fmt.Errorf("tx was not persisted: vub=%d, height=%d", txs[i].vub, currBlock)
|
||||
}
|
||||
|
||||
pollInterval, pollIntervalChanged = nextPollInterval(it, pollInterval)
|
||||
if pollIntervalChanged && viper.GetBool(commonflags.Verbose) {
|
||||
cmd.Printf("Pool interval to check transaction persistence changed: %s\n", pollInterval.String())
|
||||
}
|
||||
|
||||
timer := time.NewTimer(pollInterval)
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return cmd.Context().Err()
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
it++
|
||||
}
|
||||
}
|
||||
|
||||
return retErr
|
||||
}
|
||||
|
||||
func nextPollInterval(it int, previous time.Duration) (time.Duration, bool) {
|
||||
const minPollInterval = 1 * time.Second
|
||||
const maxPollInterval = 16 * time.Second
|
||||
const changeAfter = 5
|
||||
if it == 0 {
|
||||
return minPollInterval, true
|
||||
}
|
||||
if it%changeAfter != 0 {
|
||||
return previous, false
|
||||
}
|
||||
nextInterval := previous * 2
|
||||
if nextInterval > maxPollInterval {
|
||||
return maxPollInterval, previous != maxPollInterval
|
||||
}
|
||||
return nextInterval, true
|
||||
}
|
||||
|
||||
// sendCommitteeTx creates transaction from script, signs it by committee nodes and sends it to RPC.
|
||||
// If tryGroup is false, global scope is used for the signer (useful when
|
||||
// working with native contracts).
|
||||
|
|
|
@ -18,8 +18,10 @@ import (
|
|||
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
io2 "github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
|
@ -31,7 +33,7 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,19 +45,15 @@ const (
|
|||
containerContract = "container"
|
||||
frostfsIDContract = "frostfsid"
|
||||
netmapContract = "netmap"
|
||||
policyContract = "policy"
|
||||
proxyContract = "proxy"
|
||||
)
|
||||
|
||||
const frostfsIDAuthorizedKeysConfigKey = "frostfsid.authorized_keys"
|
||||
|
||||
var (
|
||||
contractList = []string{
|
||||
balanceContract,
|
||||
containerContract,
|
||||
frostfsIDContract,
|
||||
netmapContract,
|
||||
policyContract,
|
||||
proxyContract,
|
||||
}
|
||||
|
||||
|
@ -96,10 +94,7 @@ func (c *initializeContext) deployNNS(method string) error {
|
|||
h := cs.Hash
|
||||
|
||||
nnsCs, err := c.nnsContractState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nnsCs != nil {
|
||||
if err == nil {
|
||||
if nnsCs.NEF.Checksum == cs.NEF.Checksum {
|
||||
if method == deployMethodName {
|
||||
c.Command.Println("NNS contract is already deployed.")
|
||||
|
@ -117,13 +112,28 @@ func (c *initializeContext) deployNNS(method string) error {
|
|||
}
|
||||
|
||||
params := getContractDeployParameters(cs, nil)
|
||||
signer := transaction.Signer{
|
||||
Account: c.CommitteeAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CalledByEntry,
|
||||
}
|
||||
|
||||
invokeHash := management.Hash
|
||||
if method == updateMethodName {
|
||||
invokeHash = nnsCs.Hash
|
||||
}
|
||||
|
||||
tx, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
|
||||
res, err := invokeFunction(c.Client, invokeHash, method, params, []transaction.Signer{signer})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't deploy NNS contract: %w", err)
|
||||
}
|
||||
if res.State != vmstate.Halt.String() {
|
||||
return fmt.Errorf("can't deploy NNS contract: %s", res.FaultException)
|
||||
}
|
||||
|
||||
tx, err := c.Client.CreateTxFromScript(res.Script, c.CommitteeAcc, res.GasConsumed, 0, []rpcclient.SignerAccount{{
|
||||
Signer: signer,
|
||||
Account: c.CommitteeAcc,
|
||||
}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create deploy tx for %s: %w", nnsContract, err)
|
||||
}
|
||||
|
@ -356,9 +366,8 @@ func (c *initializeContext) deployContracts() error {
|
|||
}
|
||||
|
||||
func (c *initializeContext) isUpdated(ctrHash util.Uint160, cs *contractState) bool {
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
realCs, err := r.GetContract(ctrHash)
|
||||
return err == nil && realCs != nil && realCs.NEF.Checksum == cs.NEF.Checksum
|
||||
realCs, err := c.Client.GetContractStateByHash(ctrHash)
|
||||
return err == nil && realCs.NEF.Checksum == cs.NEF.Checksum
|
||||
}
|
||||
|
||||
func (c *initializeContext) getContract(ctrName string) *contractState {
|
||||
|
@ -508,7 +517,8 @@ func getContractDeployParameters(cs *contractState, deployData []any) []any {
|
|||
}
|
||||
|
||||
func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any, method string) []any {
|
||||
items := make([]any, 0, 6)
|
||||
items := make([]any, 1, 6)
|
||||
items[0] = false // notaryDisabled is false
|
||||
|
||||
switch ctrName {
|
||||
case frostfsContract:
|
||||
|
@ -526,8 +536,7 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
case containerContract:
|
||||
// In case if NNS is updated multiple times, we can't calculate
|
||||
// it's actual hash based on local data, thus query chain.
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
nnsCs, err := r.GetContractByID(1)
|
||||
nnsCs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
panic("NNS is not yet deployed")
|
||||
}
|
||||
|
@ -538,12 +547,9 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
nnsCs.Hash,
|
||||
"container")
|
||||
case frostfsIDContract:
|
||||
hs, err := getFrostfsIDAuthorizedKeys(viper.GetViper(), c.CommitteeAcc.PublicKey().GetScriptHash())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
items = append(items, hs)
|
||||
items = append(items,
|
||||
c.Contracts[netmapContract].Hash,
|
||||
c.Contracts[containerContract].Hash)
|
||||
case netmapContract:
|
||||
md := getDefaultNetmapContractConfigMap()
|
||||
if method == updateMethodName {
|
||||
|
@ -577,8 +583,6 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
configParam)
|
||||
case proxyContract:
|
||||
items = nil
|
||||
case policyContract:
|
||||
items = []any{nil}
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid contract name: %s", ctrName))
|
||||
}
|
||||
|
@ -586,8 +590,7 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
|
|||
}
|
||||
|
||||
func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item, error) {
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NNS is not yet deployed: %w", err)
|
||||
}
|
||||
|
@ -603,11 +606,12 @@ func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item,
|
|||
}
|
||||
|
||||
func (c *initializeContext) getAlphabetDeployItems(i, n int) []any {
|
||||
items := make([]any, 5)
|
||||
items[0] = c.Contracts[netmapContract].Hash
|
||||
items[1] = c.Contracts[proxyContract].Hash
|
||||
items[2] = innerring.GlagoliticLetter(i).String()
|
||||
items[3] = int64(i)
|
||||
items[4] = int64(n)
|
||||
items := make([]any, 6)
|
||||
items[0] = false
|
||||
items[1] = c.Contracts[netmapContract].Hash
|
||||
items[2] = c.Contracts[proxyContract].Hash
|
||||
items[3] = innerring.GlagoliticLetter(i).String()
|
||||
items[4] = int64(i)
|
||||
items[5] = int64(n)
|
||||
return items
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
nnsClient "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
|
@ -31,8 +30,7 @@ const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
|
|||
const frostfsOpsEmail = "ops@frostfs.info"
|
||||
|
||||
func (c *initializeContext) setNNS() error {
|
||||
r := management.NewReader(c.ReadOnlyInvoker)
|
||||
nnsCs, err := r.GetContractByID(1)
|
||||
nnsCs, err := c.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,17 +3,14 @@ package morph
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -44,12 +41,12 @@ func (c *initializeContext) registerCandidateRange(start, end int) error {
|
|||
panic(fmt.Sprintf("BUG: %v", w.Err))
|
||||
}
|
||||
|
||||
signers := []actor.SignerAccount{{
|
||||
signers := []rpcclient.SignerAccount{{
|
||||
Signer: c.getSigner(false, c.CommitteeAcc),
|
||||
Account: c.CommitteeAcc,
|
||||
}}
|
||||
for _, acc := range c.Accounts[start:end] {
|
||||
signers = append(signers, actor.SignerAccount{
|
||||
signers = append(signers, rpcclient.SignerAccount{
|
||||
Signer: transaction.Signer{
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CustomContracts,
|
||||
|
@ -59,11 +56,7 @@ func (c *initializeContext) registerCandidateRange(start, end int) error {
|
|||
})
|
||||
}
|
||||
|
||||
act, err := actor.New(c.Client, signers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create actor: %w", err)
|
||||
}
|
||||
tx, err := act.MakeRun(w.Bytes())
|
||||
tx, err := c.Client.CreateTxFromScript(w.Bytes(), c.CommitteeAcc, -1, 0, signers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create tx: %w", err)
|
||||
}
|
||||
|
@ -141,9 +134,8 @@ func (c *initializeContext) transferNEOToAlphabetContracts() error {
|
|||
}
|
||||
|
||||
func (c *initializeContext) transferNEOFinished(neoHash util.Uint160) (bool, error) {
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
|
||||
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
|
||||
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
|
||||
bal, err := c.Client.NEP17BalanceOf(neoHash, c.CommitteeAcc.Contract.ScriptHash())
|
||||
return bal < native.NEOTotalSupply, err
|
||||
}
|
||||
|
||||
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
contractsPath = "../../../../../../contract/frostfs-contract-v0.18.0.tar.gz"
|
||||
contractsPath = "../../../../../../frostfs-contract/frostfs-contract-v0.16.0.tar.gz"
|
||||
protoFileName = "proto.yml"
|
||||
)
|
||||
|
||||
|
@ -58,9 +58,7 @@ func testInitialize(t *testing.T, committeeSize int) {
|
|||
|
||||
// Set to the path or remove the next statement to download from the network.
|
||||
require.NoError(t, initCmd.Flags().Set(contractsInitFlag, contractsPath))
|
||||
|
||||
dumpPath := filepath.Join(testdataDir, "out")
|
||||
require.NoError(t, initCmd.Flags().Set(localDumpFlag, dumpPath))
|
||||
v.Set(localDumpFlag, filepath.Join(testdataDir, "out"))
|
||||
v.Set(alphabetWalletsFlag, testdataDir)
|
||||
v.Set(epochDurationInitFlag, 1)
|
||||
v.Set(maxObjectSizeInitFlag, 1024)
|
||||
|
@ -69,15 +67,12 @@ func testInitialize(t *testing.T, committeeSize int) {
|
|||
require.NoError(t, initializeSideChainCmd(initCmd, nil))
|
||||
|
||||
t.Run("force-new-epoch", func(t *testing.T) {
|
||||
require.NoError(t, forceNewEpoch.Flags().Set(localDumpFlag, dumpPath))
|
||||
require.NoError(t, forceNewEpochCmd(forceNewEpoch, nil))
|
||||
})
|
||||
t.Run("set-config", func(t *testing.T) {
|
||||
require.NoError(t, setConfig.Flags().Set(localDumpFlag, dumpPath))
|
||||
require.NoError(t, setConfigCmd(setConfig, []string{"MaintenanceModeAllowed=true"}))
|
||||
})
|
||||
t.Run("set-policy", func(t *testing.T) {
|
||||
require.NoError(t, setPolicy.Flags().Set(localDumpFlag, dumpPath))
|
||||
require.NoError(t, setPolicyCmd(setPolicy, []string{"ExecFeeFactor=1"}))
|
||||
})
|
||||
t.Run("remove-node", func(t *testing.T) {
|
||||
|
@ -85,7 +80,6 @@ func testInitialize(t *testing.T, committeeSize int) {
|
|||
require.NoError(t, err)
|
||||
|
||||
pub := hex.EncodeToString(pk.PublicKey().Bytes())
|
||||
require.NoError(t, removeNodes.Flags().Set(localDumpFlag, dumpPath))
|
||||
require.NoError(t, removeNodesCmd(removeNodes, []string{pub}))
|
||||
})
|
||||
}
|
||||
|
@ -145,38 +139,3 @@ func setTestCredentials(v *viper.Viper, size int) {
|
|||
}
|
||||
v.Set("credentials.contract", testContractPassword)
|
||||
}
|
||||
|
||||
func TestNextPollInterval(t *testing.T) {
|
||||
var pollInterval time.Duration
|
||||
var iteration int
|
||||
|
||||
pollInterval, hasChanged := nextPollInterval(iteration, pollInterval)
|
||||
require.True(t, hasChanged)
|
||||
require.Equal(t, time.Second, pollInterval)
|
||||
|
||||
iteration = 4
|
||||
pollInterval, hasChanged = nextPollInterval(iteration, pollInterval)
|
||||
require.False(t, hasChanged)
|
||||
require.Equal(t, time.Second, pollInterval)
|
||||
|
||||
iteration = 5
|
||||
pollInterval, hasChanged = nextPollInterval(iteration, pollInterval)
|
||||
require.True(t, hasChanged)
|
||||
require.Equal(t, 2*time.Second, pollInterval)
|
||||
|
||||
iteration = 10
|
||||
pollInterval, hasChanged = nextPollInterval(iteration, pollInterval)
|
||||
require.True(t, hasChanged)
|
||||
require.Equal(t, 4*time.Second, pollInterval)
|
||||
|
||||
iteration = 20
|
||||
pollInterval = 32 * time.Second
|
||||
pollInterval, hasChanged = nextPollInterval(iteration, pollInterval)
|
||||
require.True(t, hasChanged) // from 32s to 16s
|
||||
require.Equal(t, 16*time.Second, pollInterval)
|
||||
|
||||
pollInterval = 16 * time.Second
|
||||
pollInterval, hasChanged = nextPollInterval(iteration, pollInterval)
|
||||
require.False(t, hasChanged)
|
||||
require.Equal(t, 16*time.Second, pollInterval)
|
||||
}
|
||||
|
|
|
@ -2,18 +2,15 @@ package morph
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
scContext "github.com/nspcc-dev/neo-go/pkg/smartcontract/context"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
@ -36,11 +33,11 @@ func (c *initializeContext) transferFunds() error {
|
|||
return err
|
||||
}
|
||||
|
||||
var transfers []transferTarget
|
||||
var transfers []rpcclient.TransferTarget
|
||||
for _, acc := range c.Accounts {
|
||||
to := acc.Contract.ScriptHash()
|
||||
transfers = append(transfers,
|
||||
transferTarget{
|
||||
rpcclient.TransferTarget{
|
||||
Token: gas.Hash,
|
||||
Address: to,
|
||||
Amount: initialAlphabetGASAmount,
|
||||
|
@ -50,19 +47,25 @@ func (c *initializeContext) transferFunds() error {
|
|||
|
||||
// It is convenient to have all funds at the committee account.
|
||||
transfers = append(transfers,
|
||||
transferTarget{
|
||||
rpcclient.TransferTarget{
|
||||
Token: gas.Hash,
|
||||
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
||||
Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
|
||||
},
|
||||
transferTarget{
|
||||
rpcclient.TransferTarget{
|
||||
Token: neo.Hash,
|
||||
Address: c.CommitteeAcc.Contract.ScriptHash(),
|
||||
Amount: native.NEOTotalSupply,
|
||||
},
|
||||
)
|
||||
|
||||
tx, err := createNEP17MultiTransferTx(c.Client, c.ConsensusAcc, transfers)
|
||||
tx, err := createNEP17MultiTransferTx(c.Client, c.ConsensusAcc, 0, transfers, []rpcclient.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: c.ConsensusAcc.Contract.ScriptHash(),
|
||||
Scopes: transaction.CalledByEntry,
|
||||
},
|
||||
Account: c.ConsensusAcc,
|
||||
}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create transfer transaction: %w", err)
|
||||
}
|
||||
|
@ -77,9 +80,8 @@ func (c *initializeContext) transferFunds() error {
|
|||
func (c *initializeContext) transferFundsFinished() (bool, error) {
|
||||
acc := c.Accounts[0]
|
||||
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
||||
res, err := r.BalanceOf(acc.Contract.ScriptHash())
|
||||
return res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) == 1, err
|
||||
res, err := c.Client.NEP17BalanceOf(gas.Hash, acc.Contract.ScriptHash())
|
||||
return res > initialAlphabetGASAmount/2, err
|
||||
}
|
||||
|
||||
func (c *initializeContext) multiSignAndSend(tx *transaction.Transaction, accType string) error {
|
||||
|
@ -91,13 +93,12 @@ func (c *initializeContext) multiSignAndSend(tx *transaction.Transaction, accTyp
|
|||
}
|
||||
|
||||
func (c *initializeContext) multiSign(tx *transaction.Transaction, accType string) error {
|
||||
version, err := c.Client.GetVersion()
|
||||
network, err := c.Client.GetNetwork()
|
||||
if err != nil {
|
||||
// error appears only if client
|
||||
// has not been initialized
|
||||
panic(err)
|
||||
}
|
||||
network := version.Protocol.Network
|
||||
|
||||
// Use parameter context to avoid dealing with signature order.
|
||||
pc := scContext.NewParameterContext("", network, tx)
|
||||
|
@ -145,17 +146,16 @@ func (c *initializeContext) multiSign(tx *transaction.Transaction, accType strin
|
|||
func (c *initializeContext) transferGASToProxy() error {
|
||||
proxyCs := c.getContract(proxyContract)
|
||||
|
||||
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
|
||||
bal, err := r.BalanceOf(proxyCs.Hash)
|
||||
if err != nil || bal.Sign() > 0 {
|
||||
bal, err := c.Client.NEP17BalanceOf(gas.Hash, proxyCs.Hash)
|
||||
if err != nil || bal > 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := createNEP17MultiTransferTx(c.Client, c.CommitteeAcc, []transferTarget{{
|
||||
tx, err := createNEP17MultiTransferTx(c.Client, c.CommitteeAcc, 0, []rpcclient.TransferTarget{{
|
||||
Token: gas.Hash,
|
||||
Address: proxyCs.Hash,
|
||||
Amount: initialProxyGASAmount,
|
||||
}})
|
||||
}}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -167,14 +167,8 @@ func (c *initializeContext) transferGASToProxy() error {
|
|||
return c.awaitTx()
|
||||
}
|
||||
|
||||
type transferTarget struct {
|
||||
Token util.Uint160
|
||||
Address util.Uint160
|
||||
Amount int64
|
||||
Data any
|
||||
}
|
||||
|
||||
func createNEP17MultiTransferTx(c Client, acc *wallet.Account, recipients []transferTarget) (*transaction.Transaction, error) {
|
||||
func createNEP17MultiTransferTx(c Client, acc *wallet.Account, netFee int64,
|
||||
recipients []rpcclient.TransferTarget, cosigners []rpcclient.SignerAccount) (*transaction.Transaction, error) {
|
||||
from := acc.Contract.ScriptHash()
|
||||
|
||||
w := io.NewBufBinWriter()
|
||||
|
@ -186,18 +180,11 @@ func createNEP17MultiTransferTx(c Client, acc *wallet.Account, recipients []tran
|
|||
if w.Err != nil {
|
||||
return nil, fmt.Errorf("failed to create transfer script: %w", w.Err)
|
||||
}
|
||||
|
||||
signers := []actor.SignerAccount{{
|
||||
return c.CreateTxFromScript(w.Bytes(), acc, -1, netFee, append([]rpcclient.SignerAccount{{
|
||||
Signer: transaction.Signer{
|
||||
Account: acc.Contract.ScriptHash(),
|
||||
Account: from,
|
||||
Scopes: transaction.CalledByEntry,
|
||||
},
|
||||
Account: acc,
|
||||
}}
|
||||
|
||||
act, err := actor.New(c, signers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't create actor: %w", err)
|
||||
}
|
||||
return act.MakeRun(w.Bytes())
|
||||
}}, cosigners...))
|
||||
}
|
||||
|
|
|
@ -10,28 +10,33 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/network/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -83,7 +88,7 @@ func newLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet
|
|||
go bc.Run()
|
||||
|
||||
if cmd.Name() != "init" {
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600)
|
||||
f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open local dump: %w", err)
|
||||
}
|
||||
|
@ -114,10 +119,29 @@ func (l *localClient) GetBlockCount() (uint32, error) {
|
|||
return l.bc.BlockHeight(), nil
|
||||
}
|
||||
|
||||
func (l *localClient) GetContractStateByID(id int32) (*state.Contract, error) {
|
||||
h, err := l.bc.GetContractScriptHash(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return l.GetContractStateByHash(h)
|
||||
}
|
||||
|
||||
func (l *localClient) GetContractStateByHash(h util.Uint160) (*state.Contract, error) {
|
||||
if cs := l.bc.GetContractState(h); cs != nil {
|
||||
return cs, nil
|
||||
}
|
||||
return nil, storage.ErrKeyNotFound
|
||||
}
|
||||
|
||||
func (l *localClient) GetNativeContracts() ([]state.NativeContract, error) {
|
||||
return l.bc.GetNatives(), nil
|
||||
}
|
||||
|
||||
func (l *localClient) GetNetwork() (netmode.Magic, error) {
|
||||
return l.bc.GetConfig().Magic, nil
|
||||
}
|
||||
|
||||
func (l *localClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*result.ApplicationLog, error) {
|
||||
aer, err := l.bc.GetAppExecResults(h, *t)
|
||||
if err != nil {
|
||||
|
@ -128,6 +152,34 @@ func (l *localClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul
|
|||
return &a, nil
|
||||
}
|
||||
|
||||
func (l *localClient) CreateTxFromScript(script []byte, acc *wallet.Account, sysFee int64, netFee int64, cosigners []rpcclient.SignerAccount) (*transaction.Transaction, error) {
|
||||
signers, accounts, err := getSigners(acc, cosigners)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to construct tx signers: %w", err)
|
||||
}
|
||||
if sysFee < 0 {
|
||||
res, err := l.InvokeScript(script, signers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't add system fee to transaction: %w", err)
|
||||
}
|
||||
if res.State != "HALT" {
|
||||
return nil, fmt.Errorf("can't add system fee to transaction: bad vm state: %s due to an error: %s", res.State, res.FaultException)
|
||||
}
|
||||
sysFee = res.GasConsumed
|
||||
}
|
||||
|
||||
tx := transaction.New(script, sysFee)
|
||||
tx.Signers = signers
|
||||
tx.ValidUntilBlock = l.bc.BlockHeight() + 2
|
||||
|
||||
err = l.AddNetworkFee(tx, netFee, accounts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add network fee: %w", err)
|
||||
}
|
||||
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func (l *localClient) GetCommittee() (keys.PublicKeys, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
|
@ -148,6 +200,21 @@ func (l *localClient) InvokeFunction(h util.Uint160, method string, sPrm []smart
|
|||
return invokeFunction(l, h, method, pp, ss)
|
||||
}
|
||||
|
||||
func (l *localClient) CalculateNotaryFee(_ uint8) (int64, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
func (l *localClient) SignAndPushP2PNotaryRequest(_ *transaction.Transaction, _ []byte, _ int64, _ int64, _ uint32, _ *wallet.Account) (*payload.P2PNotaryRequest, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
func (l *localClient) SignAndPushInvocationTx(_ []byte, _ *wallet.Account, _ int64, _ fixedn.Fixed8, _ []rpcclient.SignerAccount) (util.Uint256, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
}
|
||||
|
||||
func (l *localClient) TerminateSession(_ uuid.UUID) (bool, error) {
|
||||
// not used by `morph init` command
|
||||
panic("unexpected call")
|
||||
|
@ -187,78 +254,112 @@ func (l *localClient) InvokeContractVerify(util.Uint160, []smartcontract.Paramet
|
|||
|
||||
// CalculateNetworkFee calculates network fee for the given transaction.
|
||||
// Copied from neo-go with minor corrections (no need to support non-notary mode):
|
||||
// https://github.com/nspcc-dev/neo-go/blob/v0.103.0/pkg/services/rpcsrv/server.go#L911
|
||||
// https://github.com/nspcc-dev/neo-go/blob/v0.99.2/pkg/services/rpcsrv/server.go#L744
|
||||
func (l *localClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) {
|
||||
// Avoid setting hash for this tx: server code doesn't touch client transaction.
|
||||
data := tx.Bytes()
|
||||
tx, err := transaction.NewTransactionFromBytes(data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
hashablePart, err := tx.EncodeHashableFields()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, fmt.Errorf("failed to compute tx size: %w", err)
|
||||
}
|
||||
|
||||
size := len(hashablePart) + io.GetVarSize(len(tx.Signers))
|
||||
var (
|
||||
netFee int64
|
||||
// Verification GAS cost can't exceed this policy.
|
||||
gasLimit = l.bc.GetMaxVerificationGAS()
|
||||
)
|
||||
ef := l.bc.GetBaseExecFee()
|
||||
|
||||
var netFee int64
|
||||
for i, signer := range tx.Signers {
|
||||
w := tx.Scripts[i]
|
||||
if len(w.InvocationScript) == 0 { // No invocation provided, try to infer one.
|
||||
var paramz []manifest.Parameter
|
||||
if len(w.VerificationScript) == 0 { // Contract-based verification
|
||||
cs := l.bc.GetContractState(signer.Account)
|
||||
if cs == nil {
|
||||
return 0, fmt.Errorf("signer %d has no verification script and no deployed contract", i)
|
||||
}
|
||||
md := cs.Manifest.ABI.GetMethod(manifest.MethodVerify, -1)
|
||||
if md == nil || md.ReturnType != smartcontract.BoolType {
|
||||
return 0, fmt.Errorf("signer %d has no verify method in deployed contract", i)
|
||||
}
|
||||
paramz = md.Parameters // Might as well have none params and it's OK.
|
||||
} else { // Regular signature verification.
|
||||
if vm.IsSignatureContract(w.VerificationScript) {
|
||||
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
||||
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
||||
paramz = make([]manifest.Parameter, nSigs)
|
||||
for j := 0; j < nSigs; j++ {
|
||||
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
||||
}
|
||||
}
|
||||
var verificationScript []byte
|
||||
for _, w := range tx.Scripts {
|
||||
if w.VerificationScript != nil && hash.Hash160(w.VerificationScript).Equals(signer.Account) {
|
||||
verificationScript = w.VerificationScript
|
||||
break
|
||||
}
|
||||
inv := io.NewBufBinWriter()
|
||||
for _, p := range paramz {
|
||||
p.Type.EncodeDefaultValue(inv.BinWriter)
|
||||
}
|
||||
if inv.Err != nil {
|
||||
return 0, fmt.Errorf("failed to create dummy invocation script (signer %d): %s", i, inv.Err.Error())
|
||||
}
|
||||
w.InvocationScript = inv.Bytes()
|
||||
}
|
||||
gasConsumed, err := l.bc.VerifyWitness(signer.Account, tx, &w, gasLimit)
|
||||
if err != nil && !errors.Is(err, core.ErrInvalidSignature) {
|
||||
return 0, err
|
||||
}
|
||||
gasLimit -= gasConsumed
|
||||
netFee += gasConsumed
|
||||
size += io.GetVarSize(w.VerificationScript) + io.GetVarSize(w.InvocationScript)
|
||||
}
|
||||
if l.bc.P2PSigExtensionsEnabled() {
|
||||
attrs := tx.GetAttributes(transaction.NotaryAssistedT)
|
||||
if len(attrs) != 0 {
|
||||
na := attrs[0].Value.(*transaction.NotaryAssisted)
|
||||
netFee += (int64(na.NKeys) + 1) * l.bc.GetNotaryServiceFeePerKey()
|
||||
if verificationScript == nil {
|
||||
gasConsumed, err := l.bc.VerifyWitness(signer.Account, tx, &tx.Scripts[i], l.maxGasInvoke)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid signature: %w", err)
|
||||
}
|
||||
netFee += gasConsumed
|
||||
size += io.GetVarSize([]byte{}) + io.GetVarSize(tx.Scripts[i].InvocationScript)
|
||||
continue
|
||||
}
|
||||
|
||||
fee, sizeDelta := fee.Calculate(ef, verificationScript)
|
||||
netFee += fee
|
||||
size += sizeDelta
|
||||
}
|
||||
|
||||
fee := l.bc.FeePerByte()
|
||||
netFee += int64(size) * fee
|
||||
|
||||
return netFee, nil
|
||||
}
|
||||
|
||||
// AddNetworkFee adds network fee for each witness script and optional extra
|
||||
// network fee to transaction. `accs` is an array signer's accounts.
|
||||
// Copied from neo-go with minor corrections (no need to support contract signers):
|
||||
// https://github.com/nspcc-dev/neo-go/blob/6ff11baa1b9e4c71ef0d1de43b92a8c541ca732c/pkg/rpc/client/rpc.go#L960
|
||||
func (l *localClient) AddNetworkFee(tx *transaction.Transaction, extraFee int64, accs ...*wallet.Account) error {
|
||||
if len(tx.Signers) != len(accs) {
|
||||
return errors.New("number of signers must match number of scripts")
|
||||
}
|
||||
|
||||
size := io.GetVarSize(tx)
|
||||
ef := l.bc.GetBaseExecFee()
|
||||
for i := range tx.Signers {
|
||||
netFee, sizeDelta := fee.Calculate(ef, accs[i].Contract.Script)
|
||||
tx.NetworkFee += netFee
|
||||
size += sizeDelta
|
||||
}
|
||||
|
||||
tx.NetworkFee += int64(size)*l.bc.FeePerByte() + extraFee
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSigners returns an array of transaction signers and corresponding accounts from
|
||||
// given sender and cosigners. If cosigners list already contains sender, the sender
|
||||
// will be placed at the start of the list.
|
||||
// Copied from neo-go with minor corrections:
|
||||
// https://github.com/nspcc-dev/neo-go/blob/6ff11baa1b9e4c71ef0d1de43b92a8c541ca732c/pkg/rpc/client/rpc.go#L735
|
||||
func getSigners(sender *wallet.Account, cosigners []rpcclient.SignerAccount) ([]transaction.Signer, []*wallet.Account, error) {
|
||||
var (
|
||||
signers []transaction.Signer
|
||||
accounts []*wallet.Account
|
||||
)
|
||||
|
||||
from := sender.Contract.ScriptHash()
|
||||
s := transaction.Signer{
|
||||
Account: from,
|
||||
Scopes: transaction.None,
|
||||
}
|
||||
for _, c := range cosigners {
|
||||
if c.Signer.Account == from {
|
||||
s = c.Signer
|
||||
continue
|
||||
}
|
||||
signers = append(signers, c.Signer)
|
||||
accounts = append(accounts, c.Account)
|
||||
}
|
||||
signers = append([]transaction.Signer{s}, signers...)
|
||||
accounts = append([]*wallet.Account{sender}, accounts...)
|
||||
return signers, accounts, nil
|
||||
}
|
||||
|
||||
func (l *localClient) NEP17BalanceOf(h util.Uint160, acc util.Uint160) (int64, error) {
|
||||
res, err := invokeFunction(l, h, "balanceOf", []any{acc}, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if res.State != vmstate.Halt.String() || len(res.Stack) == 0 {
|
||||
return 0, fmt.Errorf("`balance`: invalid response (empty: %t): %s",
|
||||
len(res.Stack) == 0, res.FaultException)
|
||||
}
|
||||
bi, err := res.Stack[0].TryInteger()
|
||||
if err != nil || !bi.IsInt64() {
|
||||
return 0, fmt.Errorf("`balance`: invalid response")
|
||||
}
|
||||
return bi.Int64(), nil
|
||||
}
|
||||
|
||||
func (l *localClient) InvokeScript(script []byte, signers []transaction.Signer) (*result.Invoke, error) {
|
||||
lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash())
|
||||
if err != nil {
|
||||
|
|
|
@ -6,10 +6,13 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
|
||||
"github.com/nspcc-dev/neo-go/pkg/network/payload"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
|
@ -26,12 +29,21 @@ type Client interface {
|
|||
invoker.RPCInvoke
|
||||
|
||||
GetBlockCount() (uint32, error)
|
||||
GetContractStateByID(int32) (*state.Contract, error)
|
||||
GetContractStateByHash(util.Uint160) (*state.Contract, error)
|
||||
GetNativeContracts() ([]state.NativeContract, error)
|
||||
GetNetwork() (netmode.Magic, error)
|
||||
GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error)
|
||||
GetVersion() (*result.Version, error)
|
||||
CreateTxFromScript([]byte, *wallet.Account, int64, int64, []rpcclient.SignerAccount) (*transaction.Transaction, error)
|
||||
NEP17BalanceOf(util.Uint160, util.Uint160) (int64, error)
|
||||
SendRawTransaction(*transaction.Transaction) (util.Uint256, error)
|
||||
GetCommittee() (keys.PublicKeys, error)
|
||||
CalculateNotaryFee(uint8) (int64, error)
|
||||
CalculateNetworkFee(tx *transaction.Transaction) (int64, error)
|
||||
AddNetworkFee(*transaction.Transaction, int64, ...*wallet.Account) error
|
||||
SignAndPushInvocationTx([]byte, *wallet.Account, int64, fixedn.Fixed8, []rpcclient.SignerAccount) (util.Uint256, error)
|
||||
SignAndPushP2PNotaryRequest(*transaction.Transaction, []byte, int64, int64, uint32, *wallet.Account) (*payload.P2PNotaryRequest, error)
|
||||
}
|
||||
|
||||
type hashVUBPair struct {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
@ -15,9 +14,8 @@ func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) {
|
|||
commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
r := management.NewReader(inv)
|
||||
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := c.GetContractStateByID(1)
|
||||
commonCmd.ExitOnErr(cmd, "can't get NNS contract info: %w", err)
|
||||
|
||||
nmHash, err := nnsResolveHash(inv, cs.Hash, netmapContract+".frostfs")
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
package morph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/policy"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
|
@ -56,32 +52,3 @@ func setPolicyCmd(cmd *cobra.Command, args []string) error {
|
|||
|
||||
return wCtx.awaitTx()
|
||||
}
|
||||
|
||||
func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
||||
c, err := getN3Client(viper.GetViper())
|
||||
commonCmd.ExitOnErr(cmd, "can't create N3 client:", err)
|
||||
|
||||
inv := invoker.New(c, nil)
|
||||
policyContract := policy.NewReader(inv)
|
||||
|
||||
execFee, err := policyContract.GetExecFeeFactor()
|
||||
commonCmd.ExitOnErr(cmd, "can't get execution fee factor:", err)
|
||||
|
||||
feePerByte, err := policyContract.GetFeePerByte()
|
||||
commonCmd.ExitOnErr(cmd, "can't get fee per byte:", err)
|
||||
|
||||
storagePrice, err := policyContract.GetStoragePrice()
|
||||
commonCmd.ExitOnErr(cmd, "can't get storage price:", err)
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
|
||||
|
||||
_ = tw.Flush()
|
||||
cmd.Print(buf.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
|
||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -34,8 +33,7 @@ func removeNodesCmd(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
defer wCtx.close()
|
||||
|
||||
r := management.NewReader(wCtx.ReadOnlyInvoker)
|
||||
cs, err := r.GetContractByID(1)
|
||||
cs, err := wCtx.Client.GetContractStateByID(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get NNS contract info: %w", err)
|
||||
}
|
||||
|
|
|
@ -146,15 +146,6 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
dumpPolicy = &cobra.Command{
|
||||
Use: "dump-policy",
|
||||
Short: "Dump FrostFS policy",
|
||||
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||
_ = viper.BindPFlag(endpointFlag, cmd.Flags().Lookup(endpointFlag))
|
||||
},
|
||||
RunE: dumpPolicyCmd,
|
||||
}
|
||||
|
||||
dumpContractHashesCmd = &cobra.Command{
|
||||
Use: "dump-hashes",
|
||||
Short: "Dump deployed contract hashes",
|
||||
|
@ -248,7 +239,6 @@ func init() {
|
|||
initForceNewEpochCmd()
|
||||
initRemoveNodesCmd()
|
||||
initSetPolicyCmd()
|
||||
initDumpPolicyCmd()
|
||||
initDumpContractHashesCmd()
|
||||
initDumpNetworkConfigCmd()
|
||||
initSetConfigCmd()
|
||||
|
@ -330,7 +320,6 @@ func initSetConfigCmd() {
|
|||
setConfig.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
setConfig.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
setConfig.Flags().Bool(forceConfigSet, false, "Force setting not well-known configuration key")
|
||||
setConfig.Flags().String(localDumpFlag, "", "Path to the blocks dump file")
|
||||
}
|
||||
|
||||
func initDumpNetworkConfigCmd() {
|
||||
|
@ -348,26 +337,18 @@ func initSetPolicyCmd() {
|
|||
RootCmd.AddCommand(setPolicy)
|
||||
setPolicy.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
setPolicy.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
setPolicy.Flags().String(localDumpFlag, "", "Path to the blocks dump file")
|
||||
}
|
||||
|
||||
func initDumpPolicyCmd() {
|
||||
RootCmd.AddCommand(dumpPolicy)
|
||||
dumpPolicy.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
}
|
||||
|
||||
func initRemoveNodesCmd() {
|
||||
RootCmd.AddCommand(removeNodes)
|
||||
removeNodes.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
removeNodes.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
removeNodes.Flags().String(localDumpFlag, "", "Path to the blocks dump file")
|
||||
}
|
||||
|
||||
func initForceNewEpochCmd() {
|
||||
RootCmd.AddCommand(forceNewEpoch)
|
||||
forceNewEpoch.Flags().String(alphabetWalletsFlag, "", "Path to alphabet wallets dir")
|
||||
forceNewEpoch.Flags().StringP(endpointFlag, "r", "", "N3 RPC node endpoint")
|
||||
forceNewEpoch.Flags().String(localDumpFlag, "", "Path to the blocks dump file")
|
||||
}
|
||||
|
||||
func initGenerateStorageCmd() {
|
||||
|
|
|
@ -15,14 +15,16 @@ import (
|
|||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "frostfs-adm",
|
||||
Short: "FrostFS Administrative Tool",
|
||||
Long: `FrostFS Administrative Tool provides functions to setup and
|
||||
var (
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "frostfs-adm",
|
||||
Short: "FrostFS Administrative Tool",
|
||||
Long: `FrostFS Administrative Tool provides functions to setup and
|
||||
manage FrostFS network deployment.`,
|
||||
RunE: entryPoint,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
RunE: entryPoint,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(func() { initConfig(rootCmd) })
|
||||
|
|
|
@ -145,7 +145,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
out := applyTemplate(c)
|
||||
fatalOnErr(os.WriteFile(outPath, out, 0o644))
|
||||
fatalOnErr(os.WriteFile(outPath, out, 0644))
|
||||
|
||||
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
|
@ -71,16 +69,6 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
|
|||
return
|
||||
}
|
||||
|
||||
// SortedIDList returns sorted list of identifiers of user's containers.
|
||||
func (x ListContainersRes) SortedIDList() []cid.ID {
|
||||
list := x.cliRes.Containers()
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||
return strings.Compare(lhs, rhs) < 0
|
||||
})
|
||||
return list
|
||||
}
|
||||
|
||||
// PutContainerPrm groups parameters of PutContainer operation.
|
||||
type PutContainerPrm struct {
|
||||
Client *client.Client
|
||||
|
@ -387,23 +375,30 @@ func (x *PutObjectPrm) PrepareLocally() {
|
|||
}
|
||||
|
||||
func (x *PutObjectPrm) convertToSDKPrm(ctx context.Context) (client.PrmObjectPutInit, error) {
|
||||
putPrm := client.PrmObjectPutInit{
|
||||
XHeaders: x.xHeaders,
|
||||
BearerToken: x.bearerToken,
|
||||
Local: x.local,
|
||||
CopiesNumber: x.copyNum,
|
||||
var putPrm client.PrmObjectPutInit
|
||||
if !x.prepareLocally && x.sessionToken != nil {
|
||||
putPrm.WithinSession(*x.sessionToken)
|
||||
}
|
||||
|
||||
if x.bearerToken != nil {
|
||||
putPrm.WithBearerToken(*x.bearerToken)
|
||||
}
|
||||
|
||||
if x.local {
|
||||
putPrm.MarkLocal()
|
||||
}
|
||||
|
||||
putPrm.WithXHeaders(x.xHeaders...)
|
||||
putPrm.SetCopiesNumberByVectors(x.copyNum)
|
||||
|
||||
if x.prepareLocally {
|
||||
res, err := x.cli.NetworkInfo(ctx, client.PrmNetworkInfo{})
|
||||
if err != nil {
|
||||
return client.PrmObjectPutInit{}, err
|
||||
}
|
||||
putPrm.MaxSize = res.Info().MaxObjectSize()
|
||||
putPrm.EpochSource = epochSource(res.Info().CurrentEpoch())
|
||||
putPrm.WithoutHomomorphHash = res.Info().HomomorphicHashingDisabled()
|
||||
} else {
|
||||
putPrm.Session = x.sessionToken
|
||||
putPrm.WithObjectMaxSize(res.Info().MaxObjectSize())
|
||||
putPrm.WithEpochSource(epochSource(res.Info().CurrentEpoch()))
|
||||
putPrm.WithoutHomomorphicHash(res.Info().HomomorphicHashingDisabled())
|
||||
}
|
||||
return putPrm, nil
|
||||
}
|
||||
|
@ -689,15 +684,24 @@ func (x SearchObjectsRes) IDList() []oid.ID {
|
|||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
|
||||
cliPrm := client.PrmObjectSearch{
|
||||
XHeaders: prm.xHeaders,
|
||||
Local: prm.local,
|
||||
BearerToken: prm.bearerToken,
|
||||
Session: prm.sessionToken,
|
||||
ContainerID: &prm.cnrID,
|
||||
Filters: prm.filters,
|
||||
var cliPrm client.PrmObjectSearch
|
||||
cliPrm.InContainer(prm.cnrID)
|
||||
cliPrm.SetFilters(prm.filters)
|
||||
|
||||
if prm.sessionToken != nil {
|
||||
cliPrm.WithinSession(*prm.sessionToken)
|
||||
}
|
||||
|
||||
if prm.bearerToken != nil {
|
||||
cliPrm.WithBearerToken(*prm.bearerToken)
|
||||
}
|
||||
|
||||
if prm.local {
|
||||
cliPrm.MarkLocal()
|
||||
}
|
||||
|
||||
cliPrm.WithXHeaders(prm.xHeaders...)
|
||||
|
||||
rdr, err := prm.cli.ObjectSearchInit(ctx, cliPrm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init object search: %w", err)
|
||||
|
@ -723,11 +727,6 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
|||
return nil, fmt.Errorf("read object list: %w", err)
|
||||
}
|
||||
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
|
||||
return strings.Compare(lhs, rhs) < 0
|
||||
})
|
||||
|
||||
return &SearchObjectsRes{
|
||||
ids: list,
|
||||
}, nil
|
||||
|
|
|
@ -43,28 +43,27 @@ func getSDKClientByFlag(cmd *cobra.Command, key *ecdsa.PrivateKey, endpointFlag
|
|||
|
||||
// GetSDKClient returns default frostfs-sdk-go client.
|
||||
func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey, addr network.Address) (*client.Client, error) {
|
||||
var c client.Client
|
||||
var (
|
||||
c client.Client
|
||||
prmInit client.PrmInit
|
||||
prmDial client.PrmDial
|
||||
)
|
||||
|
||||
prmInit := client.PrmInit{
|
||||
Key: *key,
|
||||
}
|
||||
|
||||
prmDial := client.PrmDial{
|
||||
Endpoint: addr.URIAddr(),
|
||||
GRPCDialOptions: []grpc.DialOption{
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||
},
|
||||
}
|
||||
prmInit.SetDefaultPrivateKey(*key)
|
||||
prmInit.ResolveFrostFSFailures()
|
||||
prmDial.SetServerURI(addr.URIAddr())
|
||||
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
|
||||
// In CLI we can only set a timeout for the whole operation.
|
||||
// By also setting stream timeout we ensure that no operation hands
|
||||
// for too long.
|
||||
prmDial.DialTimeout = timeout
|
||||
prmDial.StreamTimeout = timeout
|
||||
prmDial.SetTimeout(timeout)
|
||||
prmDial.SetStreamTimeout(timeout)
|
||||
|
||||
common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
|
||||
}
|
||||
prmDial.SetGRPCDialOptions(
|
||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()))
|
||||
|
||||
c.Init(prmInit)
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ var accountingBalanceCmd = &cobra.Command{
|
|||
|
||||
var prm internalclient.BalanceOfPrm
|
||||
prm.SetClient(cli)
|
||||
prm.Account = idUser
|
||||
prm.SetAccount(idUser)
|
||||
|
||||
res, err := internalclient.BalanceOf(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package accounting
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -17,7 +18,9 @@ var Cmd = &cobra.Command{
|
|||
_ = viper.BindPFlag(commonflags.WalletPath, flags.Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.Account, flags.Lookup(commonflags.Account))
|
||||
_ = viper.BindPFlag(commonflags.RPC, flags.Lookup(commonflags.RPC))
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -106,7 +106,7 @@ func createEACL(cmd *cobra.Command, _ []string) {
|
|||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(outArg, buf.Bytes(), 0o644)
|
||||
err = os.WriteFile(outArg, buf.Bytes(), 0644)
|
||||
if err != nil {
|
||||
cmd.PrintErrln(err)
|
||||
os.Exit(1)
|
||||
|
|
|
@ -130,6 +130,6 @@ func createToken(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
out, _ := cmd.Flags().GetString(outFlag)
|
||||
err = os.WriteFile(out, data, 0o644)
|
||||
err = os.WriteFile(out, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err)
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ var getContainerInfoCmd = &cobra.Command{
|
|||
data = cnr.Marshal()
|
||||
}
|
||||
|
||||
err = os.WriteFile(containerPathTo, data, 0o644)
|
||||
err = os.WriteFile(containerPathTo, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "can't write container to file: %w", err)
|
||||
}
|
||||
},
|
||||
|
|
|
@ -52,7 +52,7 @@ var getExtendedACLCmd = &cobra.Command{
|
|||
|
||||
cmd.Println("dumping data to file:", containerPathTo)
|
||||
|
||||
err = os.WriteFile(containerPathTo, data, 0o644)
|
||||
err = os.WriteFile(containerPathTo, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ var listContainersCmd = &cobra.Command{
|
|||
|
||||
var prm internalclient.ListContainersPrm
|
||||
prm.SetClient(cli)
|
||||
prm.Account = idUser
|
||||
prm.SetAccount(idUser)
|
||||
|
||||
res, err := internalclient.ListContainers(cmd.Context(), prm)
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
@ -56,7 +56,7 @@ var listContainersCmd = &cobra.Command{
|
|||
Client: cli,
|
||||
}
|
||||
|
||||
containerIDs := res.SortedIDList()
|
||||
containerIDs := res.IDList()
|
||||
for _, cnrID := range containerIDs {
|
||||
if flagVarListName == "" && !flagVarListPrintAttr {
|
||||
cmd.Println(cnrID.String())
|
||||
|
|
|
@ -20,7 +20,7 @@ var containerNodesCmd = &cobra.Command{
|
|||
Short: "Show nodes for container",
|
||||
Long: "Show nodes taking part in a container at the current epoch.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cnr, pkey := getContainer(cmd)
|
||||
var cnr, pkey = getContainer(cmd)
|
||||
|
||||
if pkey == nil {
|
||||
pkey = key.GetOrGenerate(cmd)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package container
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -15,7 +16,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
ruleFlag = "rule"
|
||||
)
|
||||
|
||||
var addRuleCmd = &cobra.Command{
|
||||
Use: "add-rule",
|
||||
Short: "Add local override",
|
||||
Long: "Add local APE rule to a node with following format:\n<action>[:action_detail] <operation> [<condition1> ...] <resource>",
|
||||
Example: `allow Object.Get *
|
||||
deny Object.Get EbxzAdz5LB4uqxuz6crWKAumBNtZyK2rKsqQP7TdZvwr/*
|
||||
deny:QuotaLimitReached Object.Put Object.Resource:Department=HR *
|
||||
`,
|
||||
Run: addRule,
|
||||
}
|
||||
|
||||
func prettyJSONFormat(cmd *cobra.Command, serializedChain []byte) string {
|
||||
wr := bytes.NewBufferString("")
|
||||
err := json.Indent(wr, serializedChain, "", " ")
|
||||
commonCmd.ExitOnErr(cmd, "%w", err)
|
||||
return wr.String()
|
||||
}
|
||||
|
||||
func addRule(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
chainID, _ := cmd.Flags().GetString(chainIDFlag)
|
||||
|
||||
var cnr cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(cidStr))
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
rule, _ := cmd.Flags().GetString(ruleFlag)
|
||||
|
||||
chain := new(apechain.Chain)
|
||||
commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, []string{rule}))
|
||||
chain.ID = apechain.ID(chainID)
|
||||
serializedChain := chain.Bytes()
|
||||
|
||||
cmd.Println("Container ID: " + cidStr)
|
||||
cmd.Println("Parsed chain:\n" + prettyJSONFormat(cmd, serializedChain))
|
||||
|
||||
req := &control.AddChainLocalOverrideRequest{
|
||||
Body: &control.AddChainLocalOverrideRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
Chain: serializedChain,
|
||||
},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.AddChainLocalOverrideResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.AddChainLocalOverride(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Rule has been added. Chain id: ", resp.GetBody().GetChainId())
|
||||
}
|
||||
|
||||
func initControlAddRuleCmd() {
|
||||
initControlFlags(addRuleCmd)
|
||||
|
||||
ff := addRuleCmd.Flags()
|
||||
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
ff.String(ruleFlag, "", "Rule statement")
|
||||
ff.String(chainIDFlag, "", "Assign ID to the parsed chain")
|
||||
}
|
|
@ -220,12 +220,12 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
|
|||
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING ||
|
||||
resp.GetBody().GetDuration() == nil ||
|
||||
resp.GetBody().GetTotal() == 0 ||
|
||||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed()+resp.Body.GetSkipped() == 0 {
|
||||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
durationSeconds := float64(resp.GetBody().GetDuration().GetSeconds())
|
||||
evacuated := float64(resp.GetBody().GetEvacuated() + resp.GetBody().GetFailed() + resp.GetBody().GetSkipped())
|
||||
evacuated := float64(resp.GetBody().GetEvacuated() + resp.GetBody().GetFailed())
|
||||
avgObjEvacuationTimeSeconds := durationSeconds / evacuated
|
||||
objectsLeft := float64(resp.GetBody().GetTotal()) - evacuated
|
||||
leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft
|
||||
|
@ -285,11 +285,10 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR
|
|||
}
|
||||
|
||||
func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d.",
|
||||
sb.WriteString(fmt.Sprintf(" Evacuated %d object out of %d, failed to evacuate %d objects.",
|
||||
resp.GetBody().GetEvacuated(),
|
||||
resp.GetBody().GetTotal(),
|
||||
resp.GetBody().GetFailed(),
|
||||
resp.GetBody().GetSkipped()))
|
||||
resp.Body.GetTotal(),
|
||||
resp.Body.GetFailed()))
|
||||
}
|
||||
|
||||
func initControlEvacuationShardCmd() {
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var getRuleCmd = &cobra.Command{
|
||||
Use: "get-rule",
|
||||
Short: "Get local override",
|
||||
Long: "Get local APE override of the node",
|
||||
Run: getRule,
|
||||
}
|
||||
|
||||
func getRule(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
var cnr cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(cidStr))
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
chainID, _ := cmd.Flags().GetString(chainIDFlag)
|
||||
|
||||
req := &control.GetChainLocalOverrideRequest{
|
||||
Body: &control.GetChainLocalOverrideRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
ChainId: chainID,
|
||||
},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.GetChainLocalOverrideResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.GetChainLocalOverride(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
var chain apechain.Chain
|
||||
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain()))
|
||||
|
||||
// TODO (aarifullin): make pretty-formatted output for chains.
|
||||
cmd.Println("Parsed chain:\n" + prettyJSONFormat(cmd, chain.Bytes()))
|
||||
}
|
||||
|
||||
func initControGetRuleCmd() {
|
||||
initControlFlags(getRuleCmd)
|
||||
|
||||
ff := getRuleCmd.Flags()
|
||||
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
ff.String(chainIDFlag, "", "Chain id")
|
||||
}
|
|
@ -1,9 +1,6 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var irCmd = &cobra.Command{
|
||||
Use: "ir",
|
||||
|
@ -22,13 +19,3 @@ func initControlIRCmd() {
|
|||
initControlIRHealthCheckCmd()
|
||||
initControlIRRemoveContainerCmd()
|
||||
}
|
||||
|
||||
func printVUB(cmd *cobra.Command, vub uint32) {
|
||||
cmd.Printf("Transaction's valid until block is %d\n", vub)
|
||||
}
|
||||
|
||||
func parseVUB(cmd *cobra.Command) uint32 {
|
||||
vub, err := cmd.Flags().GetUint32(irFlagNameVUB)
|
||||
commonCmd.ExitOnErr(cmd, "invalid valid until block value: %w", err)
|
||||
return vub
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
|
@ -27,13 +29,12 @@ To check removal status "frostfs-cli container list" command can be used.`,
|
|||
}
|
||||
|
||||
func initControlIRRemoveContainerCmd() {
|
||||
initControlIRFlags(removeContainerCmd)
|
||||
initControlFlags(removeContainerCmd)
|
||||
|
||||
flags := removeContainerCmd.Flags()
|
||||
flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
flags.String(ownerFlag, "", "Container owner's wallet address.")
|
||||
removeContainerCmd.MarkFlagsMutuallyExclusive(commonflags.CIDFlag, ownerFlag)
|
||||
removeContainerCmd.MarkFlagsOneRequired(commonflags.CIDFlag, ownerFlag)
|
||||
}
|
||||
|
||||
func removeContainer(cmd *cobra.Command, _ []string) {
|
||||
|
@ -59,7 +60,6 @@ func removeContainer(cmd *cobra.Command, _ []string) {
|
|||
} else {
|
||||
cmd.Println("User containers sheduled to removal")
|
||||
}
|
||||
printVUB(cmd, resp.GetBody().GetVub())
|
||||
}
|
||||
|
||||
func prepareRemoveContainerRequest(cmd *cobra.Command) *ircontrol.RemoveContainerRequest {
|
||||
|
@ -73,6 +73,10 @@ func prepareRemoveContainerRequest(cmd *cobra.Command) *ircontrol.RemoveContaine
|
|||
ownerStr, err := cmd.Flags().GetString(ownerFlag)
|
||||
commonCmd.ExitOnErr(cmd, "failed to get owner: ", err)
|
||||
|
||||
if len(ownerStr) == 0 && len(cidStr) == 0 {
|
||||
commonCmd.ExitOnErr(cmd, "invalid usage: %w", errors.New("neither owner's wallet address nor container ID are specified"))
|
||||
}
|
||||
|
||||
if len(ownerStr) > 0 {
|
||||
var owner user.ID
|
||||
commonCmd.ExitOnErr(cmd, "invalid owner ID: %w", owner.DecodeString(ownerStr))
|
||||
|
@ -86,8 +90,5 @@ func prepareRemoveContainerRequest(cmd *cobra.Command) *ircontrol.RemoveContaine
|
|||
commonCmd.ExitOnErr(cmd, "invalid container ID: %w", containerID.DecodeString(cidStr))
|
||||
req.Body.ContainerId = containerID[:]
|
||||
}
|
||||
|
||||
req.Body.Vub = parseVUB(cmd)
|
||||
|
||||
return req
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ var removeNodeCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func initControlIRRemoveNodeCmd() {
|
||||
initControlIRFlags(removeNodeCmd)
|
||||
initControlFlags(removeNodeCmd)
|
||||
|
||||
flags := removeNodeCmd.Flags()
|
||||
flags.String("node", "", "Node public key as a hex string")
|
||||
|
@ -41,7 +41,6 @@ func removeNode(cmd *cobra.Command, _ []string) {
|
|||
req := new(ircontrol.RemoveNodeRequest)
|
||||
req.SetBody(&ircontrol.RemoveNodeRequest_Body{
|
||||
Key: nodeKey,
|
||||
Vub: parseVUB(cmd),
|
||||
})
|
||||
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", ircontrolsrv.SignMessage(pk, req))
|
||||
|
@ -56,5 +55,4 @@ func removeNode(cmd *cobra.Command, _ []string) {
|
|||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Node removed")
|
||||
printVUB(cmd, resp.GetBody().GetVub())
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ var tickEpochCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func initControlIRTickEpochCmd() {
|
||||
initControlIRFlags(tickEpochCmd)
|
||||
initControlFlags(tickEpochCmd)
|
||||
}
|
||||
|
||||
func tickEpoch(cmd *cobra.Command, _ []string) {
|
||||
|
@ -25,9 +25,7 @@ func tickEpoch(cmd *cobra.Command, _ []string) {
|
|||
c := getClient(cmd, pk)
|
||||
|
||||
req := new(ircontrol.TickEpochRequest)
|
||||
req.SetBody(&ircontrol.TickEpochRequest_Body{
|
||||
Vub: parseVUB(cmd),
|
||||
})
|
||||
req.SetBody(new(ircontrol.TickEpochRequest_Body))
|
||||
|
||||
err := ircontrolsrv.SignMessage(pk, req)
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
|
||||
|
@ -42,5 +40,4 @@ func tickEpoch(cmd *cobra.Command, _ []string) {
|
|||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
cmd.Println("Epoch tick requested")
|
||||
printVUB(cmd, resp.GetBody().GetVub())
|
||||
}
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var listRulesCmd = &cobra.Command{
|
||||
Use: "list-rules",
|
||||
Short: "List local overrides",
|
||||
Long: "List local APE overrides of the node",
|
||||
Run: listRules,
|
||||
}
|
||||
|
||||
func listRules(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
var cnr cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(cidStr))
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
req := &control.ListChainLocalOverridesRequest{
|
||||
Body: &control.ListChainLocalOverridesRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.ListChainLocalOverridesResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.ListChainLocalOverrides(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
chains := resp.GetBody().GetChains()
|
||||
if len(chains) == 0 {
|
||||
cmd.Println("Local overrides are not defined for the container.")
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range chains {
|
||||
// TODO (aarifullin): make pretty-formatted output for chains.
|
||||
var chain apechain.Chain
|
||||
commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c))
|
||||
cmd.Println("Parsed chain:\n" + prettyJSONFormat(cmd, chain.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
func initControlListRulesCmd() {
|
||||
initControlFlags(listRulesCmd)
|
||||
|
||||
ff := listRulesCmd.Flags()
|
||||
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
chainIDFlag = "chain-id"
|
||||
)
|
||||
|
||||
var removeRuleCmd = &cobra.Command{
|
||||
Use: "remove-rule",
|
||||
Short: "Remove local override",
|
||||
Long: "Remove local APE override of the node",
|
||||
Run: removeRule,
|
||||
}
|
||||
|
||||
func removeRule(cmd *cobra.Command, _ []string) {
|
||||
pk := key.Get(cmd)
|
||||
|
||||
var cnr cid.ID
|
||||
cidStr, _ := cmd.Flags().GetString(commonflags.CIDFlag)
|
||||
commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(cidStr))
|
||||
|
||||
rawCID := make([]byte, sha256.Size)
|
||||
cnr.Encode(rawCID)
|
||||
|
||||
chainID, _ := cmd.Flags().GetString(chainIDFlag)
|
||||
|
||||
req := &control.RemoveChainLocalOverrideRequest{
|
||||
Body: &control.RemoveChainLocalOverrideRequest_Body{
|
||||
ContainerId: rawCID,
|
||||
ChainId: chainID,
|
||||
},
|
||||
}
|
||||
|
||||
signRequest(cmd, pk, req)
|
||||
|
||||
cli := getClient(cmd, pk)
|
||||
|
||||
var resp *control.RemoveChainLocalOverrideResponse
|
||||
var err error
|
||||
err = cli.ExecRaw(func(client *client.Client) error {
|
||||
resp, err = control.RemoveChainLocalOverride(client, req)
|
||||
return err
|
||||
})
|
||||
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
|
||||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
if resp.GetBody().Removed {
|
||||
cmd.Println("Rule has been removed.")
|
||||
} else {
|
||||
cmd.Println("Rule has not been removed.")
|
||||
}
|
||||
}
|
||||
|
||||
func initControlRemoveRuleCmd() {
|
||||
initControlFlags(removeRuleCmd)
|
||||
|
||||
ff := removeRuleCmd.Flags()
|
||||
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||
ff.String(chainIDFlag, "", "Chain id")
|
||||
}
|
|
@ -34,10 +34,6 @@ func init() {
|
|||
shardsCmd,
|
||||
synchronizeTreeCmd,
|
||||
irCmd,
|
||||
addRuleCmd,
|
||||
removeRuleCmd,
|
||||
listRulesCmd,
|
||||
getRuleCmd,
|
||||
)
|
||||
|
||||
initControlHealthCheckCmd()
|
||||
|
@ -46,8 +42,4 @@ func init() {
|
|||
initControlShardsCmd()
|
||||
initControlSynchronizeTreeCmd()
|
||||
initControlIRCmd()
|
||||
initControlAddRuleCmd()
|
||||
initControlRemoveRuleCmd()
|
||||
initControlListRulesCmd()
|
||||
initControGetRuleCmd()
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
|
@ -50,14 +49,11 @@ func listShards(cmd *cobra.Command, _ []string) {
|
|||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
shards := resp.GetBody().GetShards()
|
||||
sortShardsByID(shards)
|
||||
|
||||
isJSON, _ := cmd.Flags().GetBool(commonflags.JSON)
|
||||
if isJSON {
|
||||
prettyPrintShardsJSON(cmd, shards)
|
||||
prettyPrintShardsJSON(cmd, resp.GetBody().GetShards())
|
||||
} else {
|
||||
prettyPrintShards(cmd, shards)
|
||||
prettyPrintShards(cmd, resp.GetBody().GetShards())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,9 +115,3 @@ func shardModeToString(m control.ShardMode) string {
|
|||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func sortShardsByID(ii []*control.ShardInfo) {
|
||||
sort.Slice(ii, func(i, j int) bool {
|
||||
return bytes.Compare(ii[i].Shard_ID, ii[j].Shard_ID) < 0
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||
|
@ -169,9 +167,5 @@ func getShardIDList(cmd *cobra.Command) [][]byte {
|
|||
res = append(res, raw)
|
||||
}
|
||||
|
||||
sort.Slice(res, func(i, j int) bool {
|
||||
return bytes.Compare(res[i], res[j]) < 0
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -14,10 +14,6 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
irFlagNameVUB = "vub"
|
||||
)
|
||||
|
||||
func initControlFlags(cmd *cobra.Command) {
|
||||
ff := cmd.Flags()
|
||||
ff.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage)
|
||||
|
@ -26,13 +22,6 @@ func initControlFlags(cmd *cobra.Command) {
|
|||
ff.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage)
|
||||
}
|
||||
|
||||
func initControlIRFlags(cmd *cobra.Command) {
|
||||
initControlFlags(cmd)
|
||||
|
||||
ff := cmd.Flags()
|
||||
ff.Uint32(irFlagNameVUB, 0, "Valid until block value for notary transaction")
|
||||
}
|
||||
|
||||
func signRequest(cmd *cobra.Command, pk *ecdsa.PrivateKey, req controlSvc.SignedMessage) {
|
||||
err := controlSvc.SignMessage(pk, req)
|
||||
commonCmd.ExitOnErr(cmd, "could not sign request: %w", err)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package netmap
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -14,7 +15,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -132,7 +132,7 @@ func createOutWriter(cmd *cobra.Command, filename string) (out io.Writer, closer
|
|||
out = os.Stdout
|
||||
closer = func() {}
|
||||
} else {
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't open file '%s': %w", filename, err))
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ var objectLockCmd = &cobra.Command{
|
|||
|
||||
obj := objectSDK.New()
|
||||
obj.SetContainerID(cnr)
|
||||
obj.SetOwnerID(idOwner)
|
||||
obj.SetOwnerID(&idOwner)
|
||||
obj.SetType(objectSDK.TypeLock)
|
||||
obj.SetAttributes(expirationAttr)
|
||||
obj.SetPayload(lock.Marshal())
|
||||
|
|
|
@ -31,10 +31,10 @@ const (
|
|||
)
|
||||
|
||||
type objectNodesInfo struct {
|
||||
containerID cid.ID
|
||||
objectID oid.ID
|
||||
relatedObjectIDs []oid.ID
|
||||
isLockOrTombstone bool
|
||||
containerID cid.ID
|
||||
objectID oid.ID
|
||||
relatedObjectIDs []oid.ID
|
||||
isLock bool
|
||||
}
|
||||
|
||||
type boolError struct {
|
||||
|
@ -101,9 +101,9 @@ func getObjectInfo(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
|
|||
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
||||
if err == nil {
|
||||
return &objectNodesInfo{
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
isLockOrTombstone: res.Header().Type() == objectSDK.TypeLock || res.Header().Type() == objectSDK.TypeTombstone,
|
||||
containerID: cnrID,
|
||||
objectID: objID,
|
||||
isLock: res.Header().Type() == objectSDK.TypeLock,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placemen
|
|||
numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
|
||||
var nodeIdx uint32
|
||||
for _, n := range rep {
|
||||
if !objInfo.isLockOrTombstone && nodeIdx == numOfReplicas { // lock and tombstone objects should be on all container nodes
|
||||
if !objInfo.isLock && nodeIdx == numOfReplicas { //lock object should be on all container nodes
|
||||
break
|
||||
}
|
||||
nodes[n.Hash()] = n
|
||||
|
@ -213,8 +213,7 @@ func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placemen
|
|||
}
|
||||
|
||||
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
|
||||
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo,
|
||||
) map[uint64]boolError {
|
||||
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo) map[uint64]boolError {
|
||||
result := make(map[uint64]boolError)
|
||||
resultMtx := &sync.Mutex{}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ func putObject(cmd *cobra.Command, _ []string) {
|
|||
attrs := getAllObjectAttributes(cmd)
|
||||
|
||||
obj.SetContainerID(cnr)
|
||||
obj.SetOwnerID(ownerID)
|
||||
obj.SetOwnerID(&ownerID)
|
||||
obj.SetAttributes(attrs...)
|
||||
|
||||
notificationInfo, err := parseObjectNotifications(cmd)
|
||||
|
@ -160,7 +160,7 @@ func readFilePayload(filename string, cmd *cobra.Command) (io.Reader, cid.ID, us
|
|||
commonCmd.ExitOnErr(cmd, "can't unmarshal object from given file: %w", objTemp.Unmarshal(buf))
|
||||
payloadReader := bytes.NewReader(objTemp.Payload())
|
||||
cnr, _ := objTemp.ContainerID()
|
||||
ownerID := objTemp.OwnerID()
|
||||
ownerID := *objTemp.OwnerID()
|
||||
return payloadReader, cnr, ownerID
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -15,7 +16,9 @@ var Cmd = &cobra.Command{
|
|||
// the viper before execution
|
||||
commonflags.Bind(cmd)
|
||||
commonflags.BindAPI(cmd)
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -28,8 +31,7 @@ func init() {
|
|||
objectHashCmd,
|
||||
objectRangeCmd,
|
||||
objectLockCmd,
|
||||
objectNodesCmd,
|
||||
}
|
||||
objectNodesCmd}
|
||||
|
||||
Cmd.AddCommand(objectChildCommands...)
|
||||
|
||||
|
|
|
@ -46,10 +46,6 @@ of frostfs-api and some useful utilities for compiling ACL rules from JSON
|
|||
notation, managing container access through protocol gates, querying network map
|
||||
and much more!`,
|
||||
Run: entryPoint,
|
||||
PersistentPreRun: func(cmd *cobra.Command, _ []string) {
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
|
@ -61,7 +57,6 @@ func Execute() {
|
|||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
cobra.EnableTraverseRunHooks = true
|
||||
|
||||
// use stdout as default output for cmd.Print()
|
||||
rootCmd.SetOut(os.Stdout)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
|
||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
|
||||
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
|
||||
|
@ -32,7 +33,9 @@ var createCmd = &cobra.Command{
|
|||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
_ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath))
|
||||
_ = viper.BindPFlag(commonflags.Account, cmd.Flags().Lookup(commonflags.Account))
|
||||
common.StartClientCommandSpan(cmd)
|
||||
},
|
||||
PersistentPostRun: common.StopClientCommandSpan,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -78,7 +81,7 @@ func createSession(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
filename, _ := cmd.Flags().GetString(outFlag)
|
||||
err = os.WriteFile(filename, data, 0o644)
|
||||
err = os.WriteFile(filename, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "can't write token to file: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,181 +0,0 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/flynn-archive/go-shlex"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidStatementFormat = errors.New("invalid statement format")
|
||||
errInvalidConditionFormat = errors.New("invalid condition format")
|
||||
errUnknownAction = errors.New("action is not recognized")
|
||||
errUnknownOperation = errors.New("operation is not recognized")
|
||||
errUnknownActionDetail = errors.New("action detail is not recognized")
|
||||
errUnknownBinaryOperator = errors.New("binary operator is not recognized")
|
||||
errUnknownCondObjectType = errors.New("condition object type is not recognized")
|
||||
)
|
||||
|
||||
// ParseAPEChain parses APE chain rules.
|
||||
func ParseAPEChain(chain *apechain.Chain, rules []string) error {
|
||||
if len(rules) == 0 {
|
||||
return errors.New("no APE rules provided")
|
||||
}
|
||||
|
||||
for _, rule := range rules {
|
||||
r := new(apechain.Rule)
|
||||
if err := ParseAPERule(r, rule); err != nil {
|
||||
return err
|
||||
}
|
||||
chain.Rules = append(chain.Rules, *r)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseAPERule parses access-policy-engine statement from the following form:
|
||||
// <action>[:action_detail] <operation> [<condition1> ...] <resource>
|
||||
//
|
||||
// Examples:
|
||||
// deny Object.Put *
|
||||
// deny:QuotaLimitReached Object.Put *
|
||||
// allow Object.Put *
|
||||
// allow Object.Get Object.Resource:Department=HR Object.Request:Actor=ownerA *
|
||||
//
|
||||
//nolint:godot
|
||||
func ParseAPERule(r *apechain.Rule, rule string) error {
|
||||
lexemes, err := shlex.Split(rule)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't parse rule '%s': %v", rule, err)
|
||||
}
|
||||
return parseRuleLexemes(r, lexemes)
|
||||
}
|
||||
|
||||
func parseRuleLexemes(r *apechain.Rule, lexemes []string) error {
|
||||
if len(lexemes) < 2 {
|
||||
return errInvalidStatementFormat
|
||||
}
|
||||
|
||||
var err error
|
||||
r.Status, err = parseStatus(lexemes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Actions, err = parseAction(lexemes[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Condition, err = parseConditions(lexemes[2 : len(lexemes)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Resources, err = parseResource(lexemes[len(lexemes)-1])
|
||||
return err
|
||||
}
|
||||
|
||||
func parseStatus(lexeme string) (apechain.Status, error) {
|
||||
action, expression, found := strings.Cut(lexeme, ":")
|
||||
switch action = strings.ToLower(action); action {
|
||||
case "deny":
|
||||
if !found {
|
||||
return apechain.AccessDenied, nil
|
||||
} else if strings.EqualFold(expression, "QuotaLimitReached") {
|
||||
return apechain.QuotaLimitReached, nil
|
||||
} else {
|
||||
return 0, fmt.Errorf("%w: %s", errUnknownActionDetail, expression)
|
||||
}
|
||||
case "allow":
|
||||
if found {
|
||||
return 0, errUnknownActionDetail
|
||||
}
|
||||
return apechain.Allow, nil
|
||||
default:
|
||||
return 0, errUnknownAction
|
||||
}
|
||||
}
|
||||
|
||||
func parseAction(lexeme string) (apechain.Actions, error) {
|
||||
switch strings.ToLower(lexeme) {
|
||||
case "object.put":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodPutObject}}, nil
|
||||
case "object.get":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodGetObject}}, nil
|
||||
case "object.head":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodHeadObject}}, nil
|
||||
case "object.delete":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, nil
|
||||
case "object.search":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodSearchObject}}, nil
|
||||
case "object.range":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodRangeObject}}, nil
|
||||
case "object.hash":
|
||||
return apechain.Actions{Names: []string{nativeschema.MethodHashObject}}, nil
|
||||
default:
|
||||
}
|
||||
return apechain.Actions{}, fmt.Errorf("%w: %s", errUnknownOperation, lexeme)
|
||||
}
|
||||
|
||||
func parseResource(lexeme string) (apechain.Resources, error) {
|
||||
if lexeme == "*" {
|
||||
return apechain.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, nil
|
||||
}
|
||||
return apechain.Resources{Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, lexeme)}}, nil
|
||||
}
|
||||
|
||||
const (
|
||||
ObjectResource = "object.resource"
|
||||
ObjectRequest = "object.request"
|
||||
)
|
||||
|
||||
var typeToCondObject = map[string]apechain.ObjectType{
|
||||
ObjectResource: apechain.ObjectResource,
|
||||
ObjectRequest: apechain.ObjectRequest,
|
||||
}
|
||||
|
||||
func parseConditions(lexemes []string) ([]apechain.Condition, error) {
|
||||
conds := make([]apechain.Condition, 0)
|
||||
|
||||
for _, lexeme := range lexemes {
|
||||
typ, expression, found := strings.Cut(lexeme, ":")
|
||||
typ = strings.ToLower(typ)
|
||||
|
||||
objType, ok := typeToCondObject[typ]
|
||||
if ok {
|
||||
if !found {
|
||||
return nil, fmt.Errorf("%w: %s", errInvalidConditionFormat, lexeme)
|
||||
}
|
||||
|
||||
var lhs, rhs string
|
||||
var binExpFound bool
|
||||
|
||||
var cond apechain.Condition
|
||||
cond.Object = objType
|
||||
|
||||
lhs, rhs, binExpFound = strings.Cut(expression, "!=")
|
||||
if !binExpFound {
|
||||
lhs, rhs, binExpFound = strings.Cut(expression, "=")
|
||||
if !binExpFound {
|
||||
return nil, fmt.Errorf("%w: %s", errUnknownBinaryOperator, expression)
|
||||
}
|
||||
cond.Op = apechain.CondStringEquals
|
||||
} else {
|
||||
cond.Op = apechain.CondStringNotEquals
|
||||
}
|
||||
|
||||
cond.Key, cond.Value = lhs, rhs
|
||||
|
||||
conds = append(conds, cond)
|
||||
} else {
|
||||
return nil, fmt.Errorf("%w: %s", errUnknownCondObjectType, typ)
|
||||
}
|
||||
}
|
||||
|
||||
return conds, nil
|
||||
}
|
|
@ -1,130 +0,0 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseAPERule(t *testing.T) {
|
||||
tests := [...]struct {
|
||||
name string
|
||||
rule string
|
||||
expectErr error
|
||||
expectRule policyengine.Rule
|
||||
}{
|
||||
{
|
||||
name: "Valid allow rule",
|
||||
rule: "allow Object.Put *",
|
||||
expectRule: policyengine.Rule{
|
||||
Status: policyengine.Allow,
|
||||
Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
|
||||
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
|
||||
Condition: []policyengine.Condition{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid deny rule",
|
||||
rule: "deny Object.Put *",
|
||||
expectRule: policyengine.Rule{
|
||||
Status: policyengine.AccessDenied,
|
||||
Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
|
||||
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
|
||||
Condition: []policyengine.Condition{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid deny rule with action detail",
|
||||
rule: "deny:QuotaLimitReached Object.Put *",
|
||||
expectRule: policyengine.Rule{
|
||||
Status: policyengine.QuotaLimitReached,
|
||||
Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutObject}},
|
||||
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
|
||||
Condition: []policyengine.Condition{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid allow rule with conditions",
|
||||
rule: "allow Object.Get Object.Resource:Department=HR Object.Request:Actor!=ownerA *",
|
||||
expectRule: policyengine.Rule{
|
||||
Status: policyengine.Allow,
|
||||
Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
|
||||
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
|
||||
Condition: []policyengine.Condition{
|
||||
{
|
||||
Op: policyengine.CondStringEquals,
|
||||
Object: policyengine.ObjectResource,
|
||||
Key: "Department",
|
||||
Value: "HR",
|
||||
},
|
||||
{
|
||||
Op: policyengine.CondStringNotEquals,
|
||||
Object: policyengine.ObjectRequest,
|
||||
Key: "Actor",
|
||||
Value: "ownerA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid rule with conditions with action detail",
|
||||
rule: "deny:QuotaLimitReached Object.Get Object.Resource:Department=HR Object.Request:Actor!=ownerA *",
|
||||
expectRule: policyengine.Rule{
|
||||
Status: policyengine.QuotaLimitReached,
|
||||
Actions: policyengine.Actions{Names: []string{nativeschema.MethodGetObject}},
|
||||
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
|
||||
Condition: []policyengine.Condition{
|
||||
{
|
||||
Op: policyengine.CondStringEquals,
|
||||
Object: policyengine.ObjectResource,
|
||||
Key: "Department",
|
||||
Value: "HR",
|
||||
},
|
||||
{
|
||||
Op: policyengine.CondStringNotEquals,
|
||||
Object: policyengine.ObjectRequest,
|
||||
Key: "Actor",
|
||||
Value: "ownerA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid rule with unknown action",
|
||||
rule: "permit Object.Put *",
|
||||
expectErr: errUnknownAction,
|
||||
},
|
||||
{
|
||||
name: "Invalid rule with unknown operation",
|
||||
rule: "allow Object.PutOut *",
|
||||
expectErr: errUnknownOperation,
|
||||
},
|
||||
{
|
||||
name: "Invalid rule with unknown action detail",
|
||||
rule: "deny:UnknownActionDetail Object.Put *",
|
||||
expectErr: errUnknownActionDetail,
|
||||
},
|
||||
{
|
||||
name: "Invalid rule with unknown condition binary operator",
|
||||
rule: "deny Object.Put Object.Resource:Department<HR *",
|
||||
expectErr: errUnknownBinaryOperator,
|
||||
},
|
||||
{
|
||||
name: "Invalid rule with unknown condition object type",
|
||||
rule: "deny Object.Put Object.ResourZe:Department=HR *",
|
||||
expectErr: errUnknownCondObjectType,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
r := new(policyengine.Rule)
|
||||
err := ParseAPERule(r, test.rule)
|
||||
require.ErrorIs(t, err, test.expectErr)
|
||||
if test.expectErr == nil {
|
||||
require.Equal(t, test.expectRule, *r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -48,7 +48,7 @@ func convertEACLTable(cmd *cobra.Command, _ []string) {
|
|||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(to, data, 0o644)
|
||||
err = os.WriteFile(to, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "can't write exteded ACL table to file: %w", err)
|
||||
|
||||
cmd.Printf("extended ACL table was successfully dumped to %s\n", to)
|
||||
|
|
|
@ -78,7 +78,7 @@ func keyerGenerate(filename string, d *keyer.Dashboard) error {
|
|||
}
|
||||
|
||||
if filename != "" {
|
||||
return os.WriteFile(filename, key, 0o600)
|
||||
return os.WriteFile(filename, key, 0600)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -56,7 +56,7 @@ func signBearerToken(cmd *cobra.Command, _ []string) {
|
|||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(to, data, 0o644)
|
||||
err = os.WriteFile(to, data, 0644)
|
||||
commonCmd.ExitOnErr(cmd, "can't write signed bearer token to file: %w", err)
|
||||
|
||||
cmd.Printf("signed bearer token was successfully dumped to %s\n", to)
|
||||
|
|
|
@ -76,7 +76,7 @@ func signSessionToken(cmd *cobra.Command, _ []string) {
|
|||
return
|
||||
}
|
||||
|
||||
err = os.WriteFile(to, data, 0o644)
|
||||
err = os.WriteFile(to, data, 0644)
|
||||
if err != nil {
|
||||
commonCmd.ExitOnErr(cmd, "", fmt.Errorf("can't write signed session token to %s: %w", to, err))
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
func newConfig() (*viper.Viper, error) {
|
||||
var err error
|
||||
dv := viper.New()
|
||||
var dv = viper.New()
|
||||
|
||||
defaultConfiguration(dv)
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||
irMetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
|
@ -62,13 +61,12 @@ func main() {
|
|||
cfg, err = newConfig()
|
||||
exitErr(err)
|
||||
|
||||
metrics := irMetrics.NewInnerRingMetrics()
|
||||
|
||||
logPrm.MetricsNamespace = "frostfs_ir"
|
||||
err = logPrm.SetLevelString(
|
||||
cfg.GetString("logger.level"),
|
||||
)
|
||||
exitErr(err)
|
||||
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
|
||||
|
||||
log, err = logger.NewLogger(logPrm)
|
||||
exitErr(err)
|
||||
|
||||
|
@ -80,7 +78,7 @@ func main() {
|
|||
metricsCmp = newMetricsComponent()
|
||||
metricsCmp.init()
|
||||
|
||||
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics)
|
||||
innerRing, err = innerring.New(ctx, log, cfg, intErr)
|
||||
exitErr(err)
|
||||
|
||||
pprofCmp.start()
|
||||
|
|
|
@ -59,7 +59,7 @@ func WriteObjectToFile(cmd *cobra.Command, path string, data []byte) {
|
|||
}
|
||||
|
||||
ExitOnErr(cmd, Errf("could not write file: %w",
|
||||
os.WriteFile(path, data, 0o644)))
|
||||
os.WriteFile(path, data, 0644)))
|
||||
|
||||
cmd.Printf("\nSaved payload to '%s' file\n", path)
|
||||
}
|
||||
|
|
|
@ -6,35 +6,46 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
|
||||
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/v2/expirable"
|
||||
)
|
||||
|
||||
type netValueReader[K any, V any] func(K) (V, error)
|
||||
|
||||
type valueWithError[V any] struct {
|
||||
type valueWithTime[V any] struct {
|
||||
v V
|
||||
t time.Time
|
||||
// cached error in order to not repeat failed request for some time
|
||||
e error
|
||||
}
|
||||
|
||||
// entity that provides TTL cache interface.
|
||||
type ttlNetCache[K comparable, V any] struct {
|
||||
cache *expirable.LRU[K, *valueWithError[V]]
|
||||
netRdr netValueReader[K, V]
|
||||
ttl time.Duration
|
||||
|
||||
sz int
|
||||
|
||||
cache *lru.Cache[K, *valueWithTime[V]]
|
||||
|
||||
netRdr netValueReader[K, V]
|
||||
|
||||
keyLocker *utilSync.KeyLocker[K]
|
||||
}
|
||||
|
||||
// complicates netValueReader with TTL caching mechanism.
|
||||
func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V]) *ttlNetCache[K, V] {
|
||||
cache := expirable.NewLRU[K, *valueWithError[V]](sz, nil, ttl)
|
||||
cache, err := lru.New[K, *valueWithTime[V]](sz)
|
||||
fatalOnErr(err)
|
||||
|
||||
return &ttlNetCache[K, V]{
|
||||
ttl: ttl,
|
||||
sz: sz,
|
||||
cache: cache,
|
||||
netRdr: netRdr,
|
||||
keyLocker: utilSync.NewKeyLocker[K](),
|
||||
|
@ -48,7 +59,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
|
|||
// returned value should not be modified.
|
||||
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
|
||||
val, ok := c.cache.Peek(key)
|
||||
if ok {
|
||||
if ok && time.Since(val.t) < c.ttl {
|
||||
return val.v, val.e
|
||||
}
|
||||
|
||||
|
@ -56,14 +67,15 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) {
|
|||
defer c.keyLocker.Unlock(key)
|
||||
|
||||
val, ok = c.cache.Peek(key)
|
||||
if ok {
|
||||
if ok && time.Since(val.t) < c.ttl {
|
||||
return val.v, val.e
|
||||
}
|
||||
|
||||
v, err := c.netRdr(key)
|
||||
|
||||
c.cache.Add(key, &valueWithError[V]{
|
||||
c.cache.Add(key, &valueWithTime[V]{
|
||||
v: v,
|
||||
t: time.Now(),
|
||||
e: err,
|
||||
})
|
||||
|
||||
|
@ -74,8 +86,9 @@ func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
|
|||
c.keyLocker.Lock(k)
|
||||
defer c.keyLocker.Unlock(k)
|
||||
|
||||
c.cache.Add(k, &valueWithError[V]{
|
||||
c.cache.Add(k, &valueWithTime[V]{
|
||||
v: v,
|
||||
t: time.Now(),
|
||||
e: e,
|
||||
})
|
||||
}
|
||||
|
@ -231,6 +244,117 @@ func (s *lruNetmapSource) Epoch() (uint64, error) {
|
|||
return s.netState.CurrentEpoch(), nil
|
||||
}
|
||||
|
||||
// wrapper over TTL cache of values read from the network
|
||||
// that implements container lister.
|
||||
type ttlContainerLister struct {
|
||||
inner *ttlNetCache[string, *cacheItemContainerList]
|
||||
client *cntClient.Client
|
||||
}
|
||||
|
||||
// value type for ttlNetCache used by ttlContainerLister.
|
||||
type cacheItemContainerList struct {
|
||||
// protects list from concurrent add/remove ops
|
||||
mtx sync.RWMutex
|
||||
// actual list of containers owner by the particular user
|
||||
list []cid.ID
|
||||
}
|
||||
|
||||
func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) ttlContainerLister {
|
||||
const containerListerCacheSize = 100
|
||||
|
||||
lruCnrListerCache := newNetworkTTLCache(containerListerCacheSize, ttl, func(strID string) (*cacheItemContainerList, error) {
|
||||
var id *user.ID
|
||||
|
||||
if strID != "" {
|
||||
id = new(user.ID)
|
||||
|
||||
err := id.DecodeString(strID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
list, err := c.ContainersOf(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cacheItemContainerList{
|
||||
list: list,
|
||||
}, nil
|
||||
})
|
||||
|
||||
return ttlContainerLister{inner: lruCnrListerCache, client: c}
|
||||
}
|
||||
|
||||
// List returns list of container IDs from the cache. If list is missing in the
|
||||
// cache or expired, then it returns container IDs from side chain and updates
|
||||
// the cache.
|
||||
func (s ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
|
||||
if id == nil {
|
||||
return s.client.ContainersOf(nil)
|
||||
}
|
||||
|
||||
item, err := s.inner.get(id.EncodeToString())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item.mtx.RLock()
|
||||
res := make([]cid.ID, len(item.list))
|
||||
copy(res, item.list)
|
||||
item.mtx.RUnlock()
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// updates cached list of owner's containers: cnr is added if flag is true, otherwise it's removed.
|
||||
// Concurrent calls can lead to some races:
|
||||
// - two parallel additions to missing owner's cache can lead to only one container to be cached
|
||||
// - async cache value eviction can lead to idle addition
|
||||
//
|
||||
// All described race cases aren't critical since cache values expire anyway, we just try
|
||||
// to increase cache actuality w/o huge overhead on synchronization.
|
||||
func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
|
||||
strOwner := owner.EncodeToString()
|
||||
|
||||
val, ok := s.inner.cache.Peek(strOwner)
|
||||
if !ok {
|
||||
// we could cache the single cnr but in this case we will disperse
|
||||
// with the Sidechain a lot
|
||||
return
|
||||
}
|
||||
|
||||
if s.inner.ttl <= time.Since(val.t) {
|
||||
return
|
||||
}
|
||||
|
||||
item := val.v
|
||||
|
||||
item.mtx.Lock()
|
||||
{
|
||||
found := false
|
||||
|
||||
for i := range item.list {
|
||||
if found = item.list[i].Equals(cnr); found {
|
||||
if !add {
|
||||
item.list = append(item.list[:i], item.list[i+1:]...)
|
||||
// if list became empty we don't remove the value from the cache
|
||||
// since empty list is a correct value, and we don't want to insta
|
||||
// re-request it from the Sidechain
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if add && !found {
|
||||
item.list = append(item.list, cnr)
|
||||
}
|
||||
}
|
||||
item.mtx.Unlock()
|
||||
}
|
||||
|
||||
type cachedIRFetcher struct {
|
||||
*ttlNetCache[struct{}, [][]byte]
|
||||
}
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTTLNetCache(t *testing.T) {
|
||||
ttlDuration := time.Millisecond * 50
|
||||
cache := newNetworkTTLCache[string, time.Time](10, ttlDuration, testNetValueReader)
|
||||
|
||||
key := "key"
|
||||
|
||||
t.Run("Test Add and Get", func(t *testing.T) {
|
||||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
val, err := cache.get(key)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ti, val)
|
||||
})
|
||||
|
||||
t.Run("Test TTL", func(t *testing.T) {
|
||||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
time.Sleep(2 * ttlDuration)
|
||||
val, err := cache.get(key)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, val, ti)
|
||||
})
|
||||
|
||||
t.Run("Test Remove", func(t *testing.T) {
|
||||
ti := time.Now()
|
||||
cache.set(key, ti, nil)
|
||||
cache.remove(key)
|
||||
val, err := cache.get(key)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, val, ti)
|
||||
})
|
||||
|
||||
t.Run("Test Cache Error", func(t *testing.T) {
|
||||
cache.set("error", time.Now(), errors.New("mock error"))
|
||||
_, err := cache.get("error")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "mock error", err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func testNetValueReader(key string) (time.Time, error) {
|
||||
if key == "error" {
|
||||
return time.Now(), errors.New("mock error")
|
||||
}
|
||||
return time.Now(), nil
|
||||
}
|
|
@ -62,7 +62,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -73,7 +72,6 @@ import (
|
|||
"github.com/panjf2000/ants/v2"
|
||||
"go.etcd.io/bbolt"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
@ -107,10 +105,7 @@ type applicationConfiguration struct {
|
|||
}
|
||||
|
||||
type shardCfg struct {
|
||||
compress bool
|
||||
estimateCompressibility bool
|
||||
estimateCompressibilityThreshold float64
|
||||
|
||||
compress bool
|
||||
smallSizeObjectLimit uint64
|
||||
uncompressableContentType []string
|
||||
refillMetabase bool
|
||||
|
@ -126,10 +121,10 @@ type shardCfg struct {
|
|||
subStorages []subStorageCfg
|
||||
|
||||
gcCfg struct {
|
||||
removerBatchSize int
|
||||
removerSleepInterval time.Duration
|
||||
expiredCollectorBatchSize int
|
||||
expiredCollectorWorkerCount int
|
||||
removerBatchSize int
|
||||
removerSleepInterval time.Duration
|
||||
expiredCollectorBatchSize int
|
||||
expiredCollectorWorkersCount int
|
||||
}
|
||||
|
||||
writecacheCfg struct {
|
||||
|
@ -222,8 +217,6 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
|
|||
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||
newConfig.mode = oldConfig.Mode()
|
||||
newConfig.compress = oldConfig.Compress()
|
||||
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
|
||||
newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold()
|
||||
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||
|
||||
|
@ -256,7 +249,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
|
|||
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
|
||||
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
|
||||
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
|
||||
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
|
||||
wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
|
||||
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
||||
wc.noSync = writeCacheCfg.NoSync()
|
||||
wc.gcInterval = writeCacheCfg.GCInterval()
|
||||
|
@ -328,7 +321,7 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s
|
|||
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||
newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
||||
}
|
||||
|
||||
// internals contains application-specific internals that are created
|
||||
|
@ -512,11 +505,6 @@ type cfgObject struct {
|
|||
|
||||
eaclSource container.EACLSource
|
||||
|
||||
// Access policy chain source is used by object service to
|
||||
// check for operation permissions but this source is also shared with
|
||||
// control service that dispatches local overrides.
|
||||
apeChainSource container.AccessPolicyEngineChainSource
|
||||
|
||||
pool cfgObjectRoutines
|
||||
|
||||
cfgLocalStorage cfgLocalStorage
|
||||
|
@ -566,28 +554,22 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
key := nodeconfig.Key(appCfg)
|
||||
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
|
||||
logPrm.MetricsNamespace = "frostfs_node"
|
||||
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
|
||||
c.internals = initInternals(appCfg, log)
|
||||
|
||||
relayOnly := nodeconfig.Relay(appCfg)
|
||||
|
||||
netState := newNetworkState()
|
||||
|
||||
c.shared = initShared(appCfg, key, netState, relayOnly)
|
||||
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
logPrm, err := c.loggerPrm()
|
||||
fatalOnErr(err)
|
||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||
log, err := logger.NewLogger(logPrm)
|
||||
fatalOnErr(err)
|
||||
if loggerconfig.ToLokiConfig(appCfg).Enabled {
|
||||
log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
|
||||
return lokiCore
|
||||
}))
|
||||
}
|
||||
|
||||
c.internals = initInternals(appCfg, log)
|
||||
|
||||
c.cfgAccounting = cfgAccounting{
|
||||
scriptHash: contractsconfig.Balance(appCfg),
|
||||
}
|
||||
|
@ -604,6 +586,9 @@ func initCfg(appCfg *config.Config) *cfg {
|
|||
|
||||
user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey)
|
||||
|
||||
c.metricsCollector = metrics.NewNodeMetrics()
|
||||
netState.metrics = c.metricsCollector
|
||||
|
||||
c.onShutdown(c.clientCache.CloseAll) // clean up connections
|
||||
c.onShutdown(c.bgClientCache.CloseAll) // clean up connections
|
||||
c.onShutdown(c.putClientCache.CloseAll) // clean up connections
|
||||
|
@ -645,15 +630,14 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
|
|||
}
|
||||
|
||||
return shared{
|
||||
key: key,
|
||||
binPublicKey: key.PublicKey().Bytes(),
|
||||
localAddr: netAddr,
|
||||
respSvc: response.NewService(netState),
|
||||
clientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
bgClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
putClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
persistate: persistate,
|
||||
metricsCollector: metrics.NewNodeMetrics(),
|
||||
key: key,
|
||||
binPublicKey: key.PublicKey().Bytes(),
|
||||
localAddr: netAddr,
|
||||
respSvc: response.NewService(netState),
|
||||
clientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
bgClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
putClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||
persistate: persistate,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -818,7 +802,6 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
fstree.WithPerm(sRead.perm),
|
||||
fstree.WithDepth(sRead.depth),
|
||||
fstree.WithNoSync(sRead.noSync),
|
||||
fstree.WithLogger(c.log),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
fstreeOpts = append(fstreeOpts,
|
||||
|
@ -850,8 +833,6 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
|||
blobstoreOpts := []blobstor.Option{
|
||||
blobstor.WithCompressObjects(shCfg.compress),
|
||||
blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType),
|
||||
blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility),
|
||||
blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold),
|
||||
blobstor.WithStorages(ss),
|
||||
blobstor.WithLogger(c.log),
|
||||
}
|
||||
|
@ -888,7 +869,7 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
|||
shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
|
||||
shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
|
||||
shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
|
||||
shard.WithExpiredCollectorWorkerCount(shCfg.gcCfg.expiredCollectorWorkerCount),
|
||||
shard.WithExpiredCollectorWorkersCount(shCfg.gcCfg.expiredCollectorWorkersCount),
|
||||
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
|
||||
pool, err := ants.NewPool(sz)
|
||||
fatalOnErr(err)
|
||||
|
@ -1062,6 +1043,7 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
c.reloadConfig(ctx)
|
||||
case syscall.SIGTERM, syscall.SIGINT:
|
||||
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
|
||||
|
||||
c.shutdown()
|
||||
|
||||
|
@ -1083,12 +1065,6 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
|||
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||
|
||||
if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) {
|
||||
c.log.Info(logs.FrostFSNodeSIGHUPSkip)
|
||||
return
|
||||
}
|
||||
defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY)
|
||||
|
||||
err := c.readConfig(c.appCfg)
|
||||
if err != nil {
|
||||
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||
|
@ -1169,14 +1145,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
|||
}
|
||||
|
||||
func (c *cfg) shutdown() {
|
||||
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
||||
if old == control.HealthStatus_SHUTTING_DOWN {
|
||||
c.log.Info(logs.FrostFSNodeShutdownSkip)
|
||||
return
|
||||
}
|
||||
if old == control.HealthStatus_STARTING {
|
||||
c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady)
|
||||
}
|
||||
c.setHealthStatus(control.HealthStatus_SHUTTING_DOWN)
|
||||
|
||||
c.ctxCancel()
|
||||
c.done <- struct{}{}
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestApiclientSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, 15*time.Second, apiclientconfig.DialTimeout(c))
|
||||
require.Equal(t, 20*time.Second, apiclientconfig.StreamTimeout(c))
|
||||
require.Equal(t, 30*time.Second, apiclientconfig.ReconnectTimeout(c))
|
||||
|
|
|
@ -223,15 +223,3 @@ func parseSizeInBytes(sizeStr string) uint64 {
|
|||
size := cast.ToFloat64(sizeStr)
|
||||
return safeMul(size, multiplier)
|
||||
}
|
||||
|
||||
// FloatOrDefault reads a configuration value
|
||||
// from c by name and casts it to float64.
|
||||
//
|
||||
// Returns defaultValue if the value can not be casted.
|
||||
func FloatOrDefault(c *Config, name string, defaultValue float64) float64 {
|
||||
v, err := cast.ToFloat64E(c.Value(name))
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ func New(configFile, configDir, envPrefix string) *Config {
|
|||
configViper.WithConfigFile(configFile),
|
||||
configViper.WithConfigDir(configDir),
|
||||
configViper.WithEnvPrefix(envPrefix))
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@ func TestConfigDir(t *testing.T) {
|
|||
cfgFileName0 := path.Join(dir, "cfg_00.json")
|
||||
cfgFileName1 := path.Join(dir, "cfg_01.yml")
|
||||
|
||||
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0777))
|
||||
require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0777))
|
||||
|
||||
c := New("", dir, "")
|
||||
require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestContractsSection(t *testing.T) {
|
|||
expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f")
|
||||
require.NoError(t, err)
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
balance := contractsconfig.Balance(c)
|
||||
container := contractsconfig.Container(c)
|
||||
netmap := contractsconfig.Netmap(c)
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestControlSection(t *testing.T) {
|
|||
pubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11")
|
||||
pubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6")
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, pubs, controlconfig.AuthorizedKeys(c))
|
||||
require.Equal(t, "localhost:8090", controlconfig.GRPC(c).Endpoint())
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestEngineSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
num := 0
|
||||
|
||||
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
|
||||
|
@ -74,24 +74,22 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, "tmp/0/cache", wc.Path())
|
||||
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
||||
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
||||
require.EqualValues(t, 30, wc.WorkerCount())
|
||||
require.EqualValues(t, 30, wc.WorkersNumber())
|
||||
require.EqualValues(t, 3221225472, wc.SizeLimit())
|
||||
|
||||
require.Equal(t, "tmp/0/meta", meta.Path())
|
||||
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
|
||||
require.Equal(t, fs.FileMode(0644), meta.BoltDB().Perm())
|
||||
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
|
||||
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
||||
|
||||
require.Equal(t, true, sc.Compress())
|
||||
require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
|
||||
require.Equal(t, true, sc.EstimateCompressibility())
|
||||
require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold())
|
||||
require.EqualValues(t, 102400, sc.SmallSizeLimit())
|
||||
|
||||
require.Equal(t, 2, len(ss))
|
||||
blz := blobovniczaconfig.From((*config.Config)(ss[0]))
|
||||
require.Equal(t, "tmp/0/blob/blobovnicza", ss[0].Path())
|
||||
require.EqualValues(t, 0o644, blz.BoltDB().Perm())
|
||||
require.EqualValues(t, 0644, blz.BoltDB().Perm())
|
||||
require.EqualValues(t, 4194304, blz.Size())
|
||||
require.EqualValues(t, 1, blz.ShallowDepth())
|
||||
require.EqualValues(t, 4, blz.ShallowWidth())
|
||||
|
@ -99,7 +97,7 @@ func TestEngineSection(t *testing.T) {
|
|||
require.EqualValues(t, 10, blz.LeafWidth())
|
||||
|
||||
require.Equal(t, "tmp/0/blob", ss[1].Path())
|
||||
require.EqualValues(t, 0o644, ss[1].Perm())
|
||||
require.EqualValues(t, 0644, ss[1].Perm())
|
||||
|
||||
fst := fstreeconfig.From((*config.Config)(ss[1]))
|
||||
require.EqualValues(t, 5, fst.Depth())
|
||||
|
@ -108,13 +106,13 @@ func TestEngineSection(t *testing.T) {
|
|||
require.EqualValues(t, 150, gc.RemoverBatchSize())
|
||||
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
|
||||
require.Equal(t, 1500, gc.ExpiredCollectorBatchSize())
|
||||
require.Equal(t, 15, gc.ExpiredCollectorWorkerCount())
|
||||
require.Equal(t, 15, gc.ExpiredCollectorWorkersCount())
|
||||
|
||||
require.Equal(t, false, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||
case 1:
|
||||
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
|
||||
require.Equal(t, fs.FileMode(0o644), pl.Perm())
|
||||
require.Equal(t, fs.FileMode(0644), pl.Perm())
|
||||
require.True(t, pl.NoSync())
|
||||
require.Equal(t, 5*time.Millisecond, pl.MaxBatchDelay())
|
||||
require.Equal(t, 100, pl.MaxBatchSize())
|
||||
|
@ -125,11 +123,11 @@ func TestEngineSection(t *testing.T) {
|
|||
require.Equal(t, "tmp/1/cache", wc.Path())
|
||||
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
||||
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
||||
require.EqualValues(t, 30, wc.WorkerCount())
|
||||
require.EqualValues(t, 30, wc.WorkersNumber())
|
||||
require.EqualValues(t, 4294967296, wc.SizeLimit())
|
||||
|
||||
require.Equal(t, "tmp/1/meta", meta.Path())
|
||||
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
|
||||
require.Equal(t, fs.FileMode(0644), meta.BoltDB().Perm())
|
||||
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
|
||||
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
||||
|
||||
|
@ -148,7 +146,7 @@ func TestEngineSection(t *testing.T) {
|
|||
require.EqualValues(t, 10, blz.LeafWidth())
|
||||
|
||||
require.Equal(t, "tmp/1/blob", ss[1].Path())
|
||||
require.EqualValues(t, 0o644, ss[1].Perm())
|
||||
require.EqualValues(t, 0644, ss[1].Perm())
|
||||
|
||||
fst := fstreeconfig.From((*config.Config)(ss[1]))
|
||||
require.EqualValues(t, 5, fst.Depth())
|
||||
|
@ -157,7 +155,7 @@ func TestEngineSection(t *testing.T) {
|
|||
require.EqualValues(t, 200, gc.RemoverBatchSize())
|
||||
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
|
||||
require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize())
|
||||
require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkerCount())
|
||||
require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkersCount())
|
||||
|
||||
require.Equal(t, true, sc.RefillMetabase())
|
||||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
type Config config.Config
|
||||
|
||||
// PermDefault are default permission bits for BlobStor data.
|
||||
const PermDefault = 0o660
|
||||
const PermDefault = 0660
|
||||
|
||||
func From(x *config.Config) *Config {
|
||||
return (*Config)(x)
|
||||
|
|
|
@ -13,7 +13,7 @@ type Config config.Config
|
|||
|
||||
const (
|
||||
// PermDefault is a default permission bits for metabase file.
|
||||
PermDefault = 0o660
|
||||
PermDefault = 0660
|
||||
)
|
||||
|
||||
// Perm returns the value of "perm" config parameter as a fs.FileMode.
|
||||
|
|
|
@ -16,11 +16,8 @@ import (
|
|||
// which provides access to Shard configurations.
|
||||
type Config config.Config
|
||||
|
||||
const (
|
||||
// SmallSizeLimitDefault is a default limit of small objects payload in bytes.
|
||||
SmallSizeLimitDefault = 1 << 20
|
||||
EstimateCompressibilityThresholdDefault = 0.1
|
||||
)
|
||||
// SmallSizeLimitDefault is a default limit of small objects payload in bytes.
|
||||
const SmallSizeLimitDefault = 1 << 20
|
||||
|
||||
// From wraps config section into Config.
|
||||
func From(c *config.Config) *Config {
|
||||
|
@ -46,30 +43,6 @@ func (x *Config) UncompressableContentTypes() []string {
|
|||
"compression_exclude_content_types")
|
||||
}
|
||||
|
||||
// EstimateCompressibility returns the value of "estimate_compressibility" config parameter.
|
||||
//
|
||||
// Returns false if the value is not a valid bool.
|
||||
func (x *Config) EstimateCompressibility() bool {
|
||||
return config.BoolSafe(
|
||||
(*config.Config)(x),
|
||||
"compression_estimate_compressibility",
|
||||
)
|
||||
}
|
||||
|
||||
// EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter.
|
||||
//
|
||||
// Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0].
|
||||
func (x *Config) EstimateCompressibilityThreshold() float64 {
|
||||
v := config.FloatOrDefault(
|
||||
(*config.Config)(x),
|
||||
"compression_estimate_compressibility_threshold",
|
||||
EstimateCompressibilityThresholdDefault)
|
||||
if v < 0.0 || v > 1.0 {
|
||||
return EstimateCompressibilityThresholdDefault
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// SmallSizeLimit returns the value of "small_object_size" config parameter.
|
||||
//
|
||||
// Returns SmallSizeLimitDefault if the value is not a positive number.
|
||||
|
|
|
@ -63,14 +63,14 @@ func (x *Config) RemoverSleepInterval() time.Duration {
|
|||
return RemoverSleepIntervalDefault
|
||||
}
|
||||
|
||||
// ExpiredCollectorWorkerCount returns the value of "expired_collector_worker_count"
|
||||
// ExpiredCollectorWorkersCount returns the value of "expired_collector_workers_count"
|
||||
// config parameter.
|
||||
//
|
||||
// Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number.
|
||||
func (x *Config) ExpiredCollectorWorkerCount() int {
|
||||
func (x *Config) ExpiredCollectorWorkersCount() int {
|
||||
s := config.IntSafe(
|
||||
(*config.Config)(x),
|
||||
"expired_collector_worker_count",
|
||||
"expired_collector_workers_count",
|
||||
)
|
||||
|
||||
if s > 0 {
|
||||
|
|
|
@ -13,7 +13,7 @@ type Config config.Config
|
|||
|
||||
const (
|
||||
// PermDefault is a default permission bits for metabase file.
|
||||
PermDefault = 0o660
|
||||
PermDefault = 0660
|
||||
)
|
||||
|
||||
// From wraps config section into Config.
|
||||
|
|
|
@ -106,13 +106,13 @@ func (x *Config) MaxObjectSize() uint64 {
|
|||
return MaxSizeDefault
|
||||
}
|
||||
|
||||
// WorkerCount returns the value of "flush_worker_count" config parameter.
|
||||
// WorkersNumber returns the value of "workers_number" config parameter.
|
||||
//
|
||||
// Returns WorkersNumberDefault if the value is not a positive number.
|
||||
func (x *Config) WorkerCount() int {
|
||||
func (x *Config) WorkersNumber() int {
|
||||
c := config.IntSafe(
|
||||
(*config.Config)(x),
|
||||
"flush_worker_count",
|
||||
"workers_number",
|
||||
)
|
||||
|
||||
if c > 0 {
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestGRPCSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
num := 0
|
||||
|
||||
IterateEndpoints(c, func(sc *Config) {
|
||||
|
|
|
@ -1,21 +1,12 @@
|
|||
package loggerconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore/loki"
|
||||
)
|
||||
|
||||
const (
|
||||
// LevelDefault is a default logger level.
|
||||
LevelDefault = "info"
|
||||
subsection = "logger"
|
||||
lokiSubsection = "loki"
|
||||
AddressDefault = "localhost:3100"
|
||||
BatchEntriesNumberDefault = 100
|
||||
BatchWaitDefault = time.Second
|
||||
LevelDefault = "info"
|
||||
)
|
||||
|
||||
// Level returns the value of "level" config parameter
|
||||
|
@ -24,7 +15,7 @@ const (
|
|||
// Returns LevelDefault if the value is not a non-empty string.
|
||||
func Level(c *config.Config) string {
|
||||
v := config.StringSafe(
|
||||
c.Sub(subsection),
|
||||
c.Sub("logger"),
|
||||
"level",
|
||||
)
|
||||
if v != "" {
|
||||
|
@ -33,44 +24,3 @@ func Level(c *config.Config) string {
|
|||
|
||||
return LevelDefault
|
||||
}
|
||||
|
||||
// ToLokiConfig extracts loki config.
|
||||
func ToLokiConfig(c *config.Config) loki.Config {
|
||||
hostname, _ := os.Hostname()
|
||||
return loki.Config{
|
||||
Enabled: config.BoolSafe(c.Sub(subsection).Sub(lokiSubsection), "enabled"),
|
||||
BatchWait: getBatchWait(c),
|
||||
BatchEntriesNumber: getBatchEntriesNumber(c),
|
||||
Endpoint: getEndpoint(c),
|
||||
Labels: map[string]string{
|
||||
"hostname": hostname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getBatchWait(c *config.Config) time.Duration {
|
||||
v := config.DurationSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_delay")
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return BatchWaitDefault
|
||||
}
|
||||
|
||||
func getBatchEntriesNumber(c *config.Config) int {
|
||||
v := config.IntSafe(c.Sub(subsection).Sub(lokiSubsection), "max_batch_size")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
||||
return BatchEntriesNumberDefault
|
||||
}
|
||||
|
||||
func getEndpoint(c *config.Config) string {
|
||||
v := config.StringSafe(c.Sub(subsection).Sub(lokiSubsection), "endpoint")
|
||||
if v != "" {
|
||||
return v
|
||||
}
|
||||
|
||||
return AddressDefault
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestLoggerSection_Level(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
v := loggerconfig.Level(c)
|
||||
require.Equal(t, "debug", v)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestMetricsSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
to := metricsconfig.ShutdownTimeout(c)
|
||||
addr := metricsconfig.Address(c)
|
||||
|
||||
|
|
|
@ -23,12 +23,14 @@ func TestMorphSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
rpcs := []client.Endpoint{
|
||||
{"wss://rpc1.morph.frostfs.info:40341/ws", 1},
|
||||
{"wss://rpc2.morph.frostfs.info:40341/ws", 2},
|
||||
}
|
||||
var (
|
||||
rpcs = []client.Endpoint{
|
||||
{"wss://rpc1.morph.frostfs.info:40341/ws", 1},
|
||||
{"wss://rpc2.morph.frostfs.info:40341/ws", 2},
|
||||
}
|
||||
)
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, rpcs, morphconfig.RPCEndpoint(c))
|
||||
require.Equal(t, 30*time.Second, morphconfig.DialTimeout(c))
|
||||
require.Equal(t, 15*time.Second, morphconfig.CacheTTL(c))
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestNodeSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
key := Key(c)
|
||||
addrs := BootstrapAddresses(c)
|
||||
attributes := Attributes(c)
|
||||
|
|
|
@ -28,11 +28,11 @@ func Put(c *config.Config) PutConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
|
||||
// PoolSizeRemote returns the value of "pool_size_remote" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeRemote() int {
|
||||
v := config.Int(g.cfg, "remote_pool_size")
|
||||
v := config.Int(g.cfg, "pool_size_remote")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
@ -40,11 +40,11 @@ func (g PutConfig) PoolSizeRemote() int {
|
|||
return PutPoolSizeDefault
|
||||
}
|
||||
|
||||
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
|
||||
// PoolSizeLocal returns the value of "pool_size_local" config parameter.
|
||||
//
|
||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||
func (g PutConfig) PoolSizeLocal() int {
|
||||
v := config.Int(g.cfg, "local_pool_size")
|
||||
v := config.Int(g.cfg, "pool_size_local")
|
||||
if v > 0 {
|
||||
return int(v)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func TestObjectSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
|
||||
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
|
||||
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
|
||||
|
|
|
@ -19,7 +19,7 @@ func TestPolicerSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, 15*time.Second, policerconfig.HeadTimeout(c))
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestProfilerSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
to := profilerconfig.ShutdownTimeout(c)
|
||||
addr := profilerconfig.Address(c)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestReplicatorSection(t *testing.T) {
|
|||
|
||||
const path = "../../../../config/example/node"
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
require.Equal(t, 15*time.Second, replicatorconfig.PutTimeout(c))
|
||||
require.Equal(t, 10, replicatorconfig.PoolSize(c))
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func TestTreeSection(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
expectedKeys = append(expectedKeys, key)
|
||||
|
||||
fileConfigTest := func(c *config.Config) {
|
||||
var fileConfigTest = func(c *config.Config) {
|
||||
treeSec := treeconfig.Tree(c)
|
||||
|
||||
require.True(t, treeSec.Enabled())
|
||||
|
|
|
@ -63,6 +63,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
// use RPC node as source of Container contract items (with caching)
|
||||
cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
|
||||
cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
|
||||
cachedContainerLister := newCachedContainerLister(client, c.cfgMorph.cacheTTL)
|
||||
|
||||
subscribeToContainerCreation(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.PutSuccess)
|
||||
|
@ -73,6 +74,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
// creation success are most commonly tracked by polling GET op.
|
||||
cnr, err := cnrSrc.Get(ev.ID)
|
||||
if err == nil {
|
||||
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
|
||||
cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
|
||||
} else {
|
||||
// unlike removal, we expect successful receive of the container
|
||||
|
@ -90,6 +92,16 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
|
||||
subscribeToContainerRemoval(c, func(e event.Event) {
|
||||
ev := e.(containerEvent.DeleteSuccess)
|
||||
|
||||
// read owner of the removed container in order to update the listing cache.
|
||||
// It's strange to read already removed container, but we can successfully hit
|
||||
// the cache.
|
||||
// TODO: use owner directly from the event after neofs-contract#256 will become resolved
|
||||
cnr, err := cachedContainerStorage.Get(ev.ID)
|
||||
if err == nil {
|
||||
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, false)
|
||||
}
|
||||
|
||||
cachedContainerStorage.handleRemoval(ev.ID)
|
||||
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||
zap.Stringer("id", ev.ID),
|
||||
|
@ -99,7 +111,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
c.cfgObject.eaclSource = cachedEACLStorage
|
||||
c.cfgObject.cnrSource = cachedContainerStorage
|
||||
|
||||
cnrRdr.lister = client
|
||||
cnrRdr.lister = cachedContainerLister
|
||||
cnrRdr.eacl = c.cfgObject.eaclSource
|
||||
cnrRdr.src = c.cfgObject.cnrSource
|
||||
|
||||
|
|
|
@ -14,8 +14,6 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const serviceNameControl = "control"
|
||||
|
||||
type treeSynchronizer struct {
|
||||
treeSvc *tree.Service
|
||||
}
|
||||
|
@ -51,7 +49,6 @@ func initControlService(c *cfg) {
|
|||
controlSvc.WithTreeService(treeSynchronizer{
|
||||
c.treeService,
|
||||
}),
|
||||
controlSvc.WithAPEChainSource(c.cfgObject.apeChainSource),
|
||||
)
|
||||
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
|
@ -69,10 +66,7 @@ func initControlService(c *cfg) {
|
|||
control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc)
|
||||
|
||||
c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) {
|
||||
runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) {
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", serviceNameControl),
|
||||
zap.String("endpoint", endpoint))
|
||||
runAndLog(ctx, c, "control", false, func(context.Context, *cfg) {
|
||||
fatalOnErr(c.cfgControlService.server.Serve(lis))
|
||||
})
|
||||
}))
|
||||
|
@ -84,20 +78,10 @@ func (c *cfg) NetmapStatus() control.NetmapStatus {
|
|||
|
||||
func (c *cfg) setHealthStatus(st control.HealthStatus) {
|
||||
c.healthStatus.Store(int32(st))
|
||||
c.metricsCollector.State().SetHealth(int32(st))
|
||||
}
|
||||
|
||||
func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) {
|
||||
if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped {
|
||||
c.metricsCollector.State().SetHealth(int32(newSt))
|
||||
if c.metricsCollector != nil {
|
||||
c.metricsCollector.State().SetHealth(int32(st))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) {
|
||||
old = control.HealthStatus(c.healthStatus.Swap(int32(st)))
|
||||
c.metricsCollector.State().SetHealth(int32(st))
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cfg) HealthStatus() control.HealthStatus {
|
||||
|
|
|
@ -110,8 +110,7 @@ func serveGRPC(c *cfg) {
|
|||
c.wg.Done()
|
||||
}()
|
||||
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", "gRPC"),
|
||||
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
|
||||
zap.Stringer("endpoint", lis.Addr()),
|
||||
)
|
||||
|
||||
|
|
|
@ -6,9 +6,7 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type httpComponent struct {
|
||||
|
@ -44,9 +42,6 @@ func (cmp *httpComponent) init(c *cfg) {
|
|||
cmp.name,
|
||||
func(ctx context.Context) {
|
||||
runAndLog(ctx, c, cmp.name, false, func(context.Context, *cfg) {
|
||||
c.log.Info(logs.FrostFSNodeStartListeningEndpoint,
|
||||
zap.String("service", cmp.name),
|
||||
zap.String("endpoint", cmp.address))
|
||||
fatalOnErr(srv.Serve())
|
||||
})
|
||||
},
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue