forked from TrueCloudLab/frostfs-node
Compare commits
59 commits
bugfix/cou
...
master
Author | SHA1 | Date | |
---|---|---|---|
d19ab43500 | |||
5bcf81d1cc | |||
c2effcc61c | |||
2285cfc36f | |||
e74d05c03f | |||
48862e0e63 | |||
89892d9754 | |||
7ac0852364 | |||
d28a5d2d7a | |||
87ac3c5279 | |||
d5ee6d3039 | |||
433aab12bb | |||
81f4cdbb91 | |||
3cd7d23f10 | |||
012af5cc38 | |||
eb5336d5ff | |||
bc8d79ddf9 | |||
29708b78d7 | |||
b9284604d9 | |||
65a4320c75 | |||
9a260c2e64 | |||
6f798b9c4b | |||
e515dd4582 | |||
8b6ec57c61 | |||
ed13387c0e | |||
5afea62ec0 | |||
c0a2f20eee | |||
2d064d0bd8 | |||
ef38420623 | |||
f7caef355b | |||
fbdfd503e4 | |||
67798bb50e | |||
5b653aa65f | |||
e314f328c4 | |||
6c96cc2af6 | |||
74db735265 | |||
3304afa9d1 | |||
b42bcdc6fa | |||
b0c5def2d9 | |||
90f3669399 | |||
07ce40e119 | |||
41038b2ec0 | |||
d83879d4b8 | |||
f6582081a4 | |||
00b1cecfb7 | |||
63466d71b2 | |||
d53732f663 | |||
3012286452 | |||
714ff784fa | |||
d2a59b2de8 | |||
acd6eb1815 | |||
42bf03e5cc | |||
5992ee901a | |||
dfb00083d0 | |||
1134760271 | |||
02bb7159a5 | |||
94302235d0 | |||
cc5360a578 | |||
4190fba86d |
160 changed files with 1998 additions and 1438 deletions
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.23 AS builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22
|
FROM golang:1.23
|
||||||
|
|
||||||
WORKDIR /tmp
|
WORKDIR /tmp
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.23 AS builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.23 AS builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.22 AS builder
|
FROM golang:1.23 AS builder
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -87,5 +87,7 @@ linters:
|
||||||
- perfsprint
|
- perfsprint
|
||||||
- testifylint
|
- testifylint
|
||||||
- protogetter
|
- protogetter
|
||||||
|
- intrange
|
||||||
|
- tenv
|
||||||
disable-all: true
|
disable-all: true
|
||||||
fast: false
|
fast: false
|
||||||
|
|
|
@ -1,11 +0,0 @@
|
||||||
pipeline:
|
|
||||||
# Kludge for non-root containers under WoodPecker
|
|
||||||
fix-ownership:
|
|
||||||
image: alpine:latest
|
|
||||||
commands: chown -R 1234:1234 .
|
|
||||||
|
|
||||||
pre-commit:
|
|
||||||
image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36
|
|
||||||
commands:
|
|
||||||
- export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)"
|
|
||||||
- pre-commit run --hook-stage manual
|
|
23
Makefile
23
Makefile
|
@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.60.3
|
LINT_VERSION ?= 1.61.0
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
||||||
PROTOC_VERSION ?= 25.0
|
PROTOC_VERSION ?= 25.0
|
||||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||||
|
@ -27,12 +27,6 @@ DIRS = $(BIN) $(RELEASE)
|
||||||
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
|
CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*)))
|
||||||
BINS = $(addprefix $(BIN)/, $(CMDS))
|
BINS = $(addprefix $(BIN)/, $(CMDS))
|
||||||
|
|
||||||
# .deb package versioning
|
|
||||||
OS_RELEASE = $(shell lsb_release -cs)
|
|
||||||
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|
||||||
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
|
||||||
sed "s/-/~/")-${OS_RELEASE}
|
|
||||||
|
|
||||||
OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
|
OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||||
TMP_DIR := .cache
|
TMP_DIR := .cache
|
||||||
|
@ -58,7 +52,7 @@ LOCODE_DB_PATH=$(abspath ./.cache/locode_db)
|
||||||
LOCODE_DB_VERSION=v0.4.0
|
LOCODE_DB_VERSION=v0.4.0
|
||||||
|
|
||||||
.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
|
.PHONY: help all images dep clean fmts fumpt imports test lint docker/lint
|
||||||
prepare-release debpackage pre-commit unpre-commit
|
prepare-release pre-commit unpre-commit
|
||||||
|
|
||||||
# To build a specific binary, use it's name prefix with bin/ as a target
|
# To build a specific binary, use it's name prefix with bin/ as a target
|
||||||
# For example `make bin/frostfs-node` will build only storage node binary
|
# For example `make bin/frostfs-node` will build only storage node binary
|
||||||
|
@ -263,19 +257,6 @@ clean:
|
||||||
rm -rf $(BIN)
|
rm -rf $(BIN)
|
||||||
rm -rf $(RELEASE)
|
rm -rf $(RELEASE)
|
||||||
|
|
||||||
# Package for Debian
|
|
||||||
debpackage:
|
|
||||||
dch -b --package frostfs-node \
|
|
||||||
--controlmaint \
|
|
||||||
--newversion $(PKG_VERSION) \
|
|
||||||
--distribution $(OS_RELEASE) \
|
|
||||||
"Please see CHANGELOG.md for code changes for $(VERSION)"
|
|
||||||
dpkg-buildpackage --no-sign -b
|
|
||||||
|
|
||||||
# Cleanup deb package build directories
|
|
||||||
debclean:
|
|
||||||
dh clean
|
|
||||||
|
|
||||||
# Download locode database
|
# Download locode database
|
||||||
locode-download:
|
locode-download:
|
||||||
mkdir -p $(TMP_DIR)
|
mkdir -p $(TMP_DIR)
|
||||||
|
|
|
@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
|
||||||
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
||||||
|
|
||||||
var i innerring.GlagoliticLetter
|
var i innerring.GlagoliticLetter
|
||||||
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
|
for i = range innerring.GlagoliticLetter(credSize) {
|
||||||
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
|
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,13 +139,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
|
||||||
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
|
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
|
||||||
bw.Reset()
|
bw.Reset()
|
||||||
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
|
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
|
||||||
emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
|
|
||||||
res, err := inv.Run(bw.Bytes())
|
res, err := inv.Run(bw.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("can't get container info: %w", err)
|
return nil, fmt.Errorf("can't get container info: %w", err)
|
||||||
}
|
}
|
||||||
if len(res.Stack) != 2 {
|
if len(res.Stack) != 1 {
|
||||||
return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
|
return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
|
||||||
}
|
}
|
||||||
|
|
||||||
cnt := new(Container)
|
cnt := new(Container)
|
||||||
|
@ -154,14 +153,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
|
||||||
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
|
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ea := new(EACL)
|
|
||||||
err = ea.FromStackItem(res.Stack[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
|
|
||||||
}
|
|
||||||
if len(ea.Value) != 0 {
|
|
||||||
cnt.EACL = ea
|
|
||||||
}
|
|
||||||
return cnt, nil
|
return cnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,10 +249,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
|
||||||
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
|
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
|
||||||
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
|
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
|
||||||
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
|
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
|
||||||
if ea := cnt.EACL; ea != nil {
|
|
||||||
emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
|
|
||||||
ea.Value, ea.Signature, ea.PublicKey, ea.Token)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
|
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
|
||||||
|
@ -322,15 +309,6 @@ type Container struct {
|
||||||
Signature []byte `json:"signature"`
|
Signature []byte `json:"signature"`
|
||||||
PublicKey []byte `json:"public_key"`
|
PublicKey []byte `json:"public_key"`
|
||||||
Token []byte `json:"token"`
|
Token []byte `json:"token"`
|
||||||
EACL *EACL `json:"eacl"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EACL represents extended ACL struct in contract storage.
|
|
||||||
type EACL struct {
|
|
||||||
Value []byte `json:"value"`
|
|
||||||
Signature []byte `json:"signature"`
|
|
||||||
PublicKey []byte `json:"public_key"`
|
|
||||||
Token []byte `json:"token"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToStackItem implements stackitem.Convertible.
|
// ToStackItem implements stackitem.Convertible.
|
||||||
|
@ -377,50 +355,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToStackItem implements stackitem.Convertible.
|
|
||||||
func (c *EACL) ToStackItem() (stackitem.Item, error) {
|
|
||||||
return stackitem.NewStruct([]stackitem.Item{
|
|
||||||
stackitem.NewByteArray(c.Value),
|
|
||||||
stackitem.NewByteArray(c.Signature),
|
|
||||||
stackitem.NewByteArray(c.PublicKey),
|
|
||||||
stackitem.NewByteArray(c.Token),
|
|
||||||
}), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromStackItem implements stackitem.Convertible.
|
|
||||||
func (c *EACL) FromStackItem(item stackitem.Item) error {
|
|
||||||
arr, ok := item.Value().([]stackitem.Item)
|
|
||||||
if !ok || len(arr) != 4 {
|
|
||||||
return errors.New("invalid stack item type")
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := arr[0].TryBytes()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("invalid eACL value")
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, err := arr[1].TryBytes()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("invalid eACL signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub, err := arr[2].TryBytes()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("invalid eACL public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
tok, err := arr[3].TryBytes()
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("invalid eACL token")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Value = value
|
|
||||||
c.Signature = sig
|
|
||||||
c.PublicKey = pub
|
|
||||||
c.Token = tok
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCIDFilterFunc returns filtering function for container IDs.
|
// getCIDFilterFunc returns filtering function for container IDs.
|
||||||
// Raw byte slices are used because it works with structures returned
|
// Raw byte slices are used because it works with structures returned
|
||||||
// from contract.
|
// from contract.
|
||||||
|
|
|
@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
|
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
|
||||||
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
|
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
|
||||||
for i := uint64(0); i < size; i++ {
|
for i := range uint64(size) {
|
||||||
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
|
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error {
|
||||||
return fmt.Errorf("can't send deploy transaction: %w", err)
|
return fmt.Errorf("can't send deploy transaction: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.Command.Println("NNS hash:", invokeHash.StringLE())
|
||||||
return c.AwaitTx()
|
return c.AwaitTx()
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,19 @@ func initDelRecordsCmd() {
|
||||||
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
|
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func initDelRecordCmd() {
|
||||||
|
Cmd.AddCommand(delRecordCmd)
|
||||||
|
delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
|
||||||
|
delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
|
||||||
|
delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
|
||||||
|
delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
|
||||||
|
delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
|
||||||
|
|
||||||
|
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
|
||||||
|
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
|
||||||
|
_ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
|
||||||
|
}
|
||||||
|
|
||||||
func addRecord(cmd *cobra.Command, _ []string) {
|
func addRecord(cmd *cobra.Command, _ []string) {
|
||||||
c, actor, _ := getRPCClient(cmd)
|
c, actor, _ := getRPCClient(cmd)
|
||||||
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||||
|
@ -115,6 +128,22 @@ func delRecords(cmd *cobra.Command, _ []string) {
|
||||||
cmd.Println("Records removed successfully")
|
cmd.Println("Records removed successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func delRecord(cmd *cobra.Command, _ []string) {
|
||||||
|
c, actor, _ := getRPCClient(cmd)
|
||||||
|
name, _ := cmd.Flags().GetString(nnsNameFlag)
|
||||||
|
data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
|
||||||
|
recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
|
||||||
|
typ, err := getRecordType(recordType)
|
||||||
|
commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
|
||||||
|
h, vub, err := c.DeleteRecord(name, typ, data)
|
||||||
|
commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
|
||||||
|
|
||||||
|
cmd.Println("Waiting for transaction to persist...")
|
||||||
|
_, err = actor.Wait(h, vub, err)
|
||||||
|
commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
|
||||||
|
cmd.Println("Record removed successfully")
|
||||||
|
}
|
||||||
|
|
||||||
func getRecordType(recordType string) (*big.Int, error) {
|
func getRecordType(recordType string) (*big.Int, error) {
|
||||||
switch strings.ToUpper(recordType) {
|
switch strings.ToUpper(recordType) {
|
||||||
case "A":
|
case "A":
|
||||||
|
|
|
@ -95,6 +95,15 @@ var (
|
||||||
},
|
},
|
||||||
Run: delRecords,
|
Run: delRecords,
|
||||||
}
|
}
|
||||||
|
delRecordCmd = &cobra.Command{
|
||||||
|
Use: "delete-record",
|
||||||
|
Short: "Removes domain record with the specified type and data",
|
||||||
|
PreRun: func(cmd *cobra.Command, _ []string) {
|
||||||
|
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
|
||||||
|
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
|
||||||
|
},
|
||||||
|
Run: delRecord,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -106,4 +115,5 @@ func init() {
|
||||||
initAddRecordCmd()
|
initAddRecordCmd()
|
||||||
initGetRecordsCmd()
|
initGetRecordsCmd()
|
||||||
initDelRecordsCmd()
|
initDelRecordsCmd()
|
||||||
|
initDelRecordCmd()
|
||||||
}
|
}
|
||||||
|
|
|
@ -659,9 +659,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
||||||
|
|
||||||
for {
|
for {
|
||||||
n, ok = rdr.Read(buf)
|
n, ok = rdr.Read(buf)
|
||||||
for i := range n {
|
list = append(list, buf[:n]...)
|
||||||
list = append(list, buf[i])
|
|
||||||
}
|
|
||||||
if !ok {
|
if !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
||||||
GRPCDialOptions: []grpc.DialOption{
|
GRPCDialOptions: []grpc.DialOption{
|
||||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||||
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
|
if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
|
||||||
|
|
|
@ -31,7 +31,6 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultNamespace = ""
|
|
||||||
namespaceTarget = "namespace"
|
namespaceTarget = "namespace"
|
||||||
containerTarget = "container"
|
containerTarget = "container"
|
||||||
userTarget = "user"
|
userTarget = "user"
|
||||||
|
|
|
@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
|
||||||
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
||||||
|
|
||||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||||
for idx := range len(members) {
|
for idx := range members {
|
||||||
partObjID := members[idx]
|
partObjID := members[idx]
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
|
|
@ -114,13 +114,15 @@ func initConfig() {
|
||||||
} else {
|
} else {
|
||||||
// Find home directory.
|
// Find home directory.
|
||||||
home, err := homedir.Dir()
|
home, err := homedir.Dir()
|
||||||
commonCmd.ExitOnErr(rootCmd, "", err)
|
if err != nil {
|
||||||
|
common.PrintVerbose(rootCmd, "Get homedir: %s", err)
|
||||||
|
} else {
|
||||||
// Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
|
// Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml"
|
||||||
viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
|
viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli"))
|
||||||
viper.SetConfigName("config")
|
viper.SetConfigName("config")
|
||||||
viper.SetConfigType("yaml")
|
viper.SetConfigType("yaml")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
viper.SetEnvPrefix(envPrefix)
|
viper.SetEnvPrefix(envPrefix)
|
||||||
viper.AutomaticEnv() // read in environment variables that match
|
viper.AutomaticEnv() // read in environment variables that match
|
||||||
|
|
|
@ -30,8 +30,6 @@ func initAddCmd() {
|
||||||
ff := addCmd.Flags()
|
ff := addCmd.Flags()
|
||||||
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
|
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
|
||||||
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
|
ff.Uint64(parentIDFlagKey, 0, "Parent node ID")
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func add(cmd *cobra.Command, _ []string) {
|
func add(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -36,7 +36,6 @@ func initAddByPathCmd() {
|
||||||
ff.String(pathFlagKey, "", "Path to a node")
|
ff.String(pathFlagKey, "", "Path to a node")
|
||||||
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
|
ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2")
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
|
_ = cobra.MarkFlagRequired(ff, pathFlagKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ package tree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
|
||||||
|
@ -20,7 +21,13 @@ import (
|
||||||
// after making Tree API public.
|
// after making Tree API public.
|
||||||
func _client() (tree.TreeServiceClient, error) {
|
func _client() (tree.TreeServiceClient, error) {
|
||||||
var netAddr network.Address
|
var netAddr network.Address
|
||||||
err := netAddr.FromString(viper.GetString(commonflags.RPC))
|
|
||||||
|
rpcEndpoint := viper.GetString(commonflags.RPC)
|
||||||
|
if rpcEndpoint == "" {
|
||||||
|
return nil, fmt.Errorf("%s is not defined", commonflags.RPC)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := netAddr.FromString(rpcEndpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -34,6 +41,7 @@ func _client() (tree.TreeServiceClient, error) {
|
||||||
metrics.NewStreamClientInterceptor(),
|
metrics.NewStreamClientInterceptor(),
|
||||||
tracing.NewStreamClientInterceptor(),
|
tracing.NewStreamClientInterceptor(),
|
||||||
),
|
),
|
||||||
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
|
||||||
|
|
|
@ -36,8 +36,6 @@ func initGetByPathCmd() {
|
||||||
ff.String(pathFlagKey, "", "Path to a node")
|
ff.String(pathFlagKey, "", "Path to a node")
|
||||||
|
|
||||||
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
|
ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node")
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getByPath(cmd *cobra.Command, _ []string) {
|
func getByPath(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -30,8 +30,6 @@ func initGetOpLogCmd() {
|
||||||
ff := getOpLogCmd.Flags()
|
ff := getOpLogCmd.Flags()
|
||||||
ff.Uint64(heightFlagKey, 0, "Height to start with")
|
ff.Uint64(heightFlagKey, 0, "Height to start with")
|
||||||
ff.Uint64(countFlagKey, 10, "Logged operations count")
|
ff.Uint64(countFlagKey, 10, "Logged operations count")
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOpLog(cmd *cobra.Command, _ []string) {
|
func getOpLog(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -20,8 +20,6 @@ var healthcheckCmd = &cobra.Command{
|
||||||
|
|
||||||
func initHealthcheckCmd() {
|
func initHealthcheckCmd() {
|
||||||
commonflags.Init(healthcheckCmd)
|
commonflags.Init(healthcheckCmd)
|
||||||
ff := healthcheckCmd.Flags()
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func healthcheck(cmd *cobra.Command, _ []string) {
|
func healthcheck(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -26,8 +26,6 @@ func initListCmd() {
|
||||||
ff := listCmd.Flags()
|
ff := listCmd.Flags()
|
||||||
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
|
||||||
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
|
_ = listCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func list(cmd *cobra.Command, _ []string) {
|
func list(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -33,8 +33,6 @@ func initMoveCmd() {
|
||||||
|
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
|
_ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey)
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func move(cmd *cobra.Command, _ []string) {
|
func move(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -29,8 +29,6 @@ func initRemoveCmd() {
|
||||||
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
|
ff.Uint64(nodeIDFlagKey, 0, "Node ID.")
|
||||||
|
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
_ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey)
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func remove(cmd *cobra.Command, _ []string) {
|
func remove(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -34,8 +34,6 @@ func initGetSubtreeCmd() {
|
||||||
|
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
|
||||||
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
|
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
|
||||||
|
|
||||||
_ = cobra.MarkFlagRequired(ff, commonflags.RPC)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSubTree(cmd *cobra.Command, _ []string) {
|
func getSubTree(cmd *cobra.Command, _ []string) {
|
||||||
|
|
|
@ -48,6 +48,8 @@ func defaultConfiguration(cfg *viper.Viper) {
|
||||||
cfg.SetDefault("node.kludge_compatibility_mode", false)
|
cfg.SetDefault("node.kludge_compatibility_mode", false)
|
||||||
|
|
||||||
cfg.SetDefault("audit.enabled", false)
|
cfg.SetDefault("audit.enabled", false)
|
||||||
|
|
||||||
|
setMultinetDefaults(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setControlDefaults(cfg *viper.Viper) {
|
func setControlDefaults(cfg *viper.Viper) {
|
||||||
|
@ -131,3 +133,11 @@ func setMorphDefaults(cfg *viper.Viper) {
|
||||||
cfg.SetDefault("morph.validators", []string{})
|
cfg.SetDefault("morph.validators", []string{})
|
||||||
cfg.SetDefault("morph.switch_interval", 2*time.Minute)
|
cfg.SetDefault("morph.switch_interval", 2*time.Minute)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setMultinetDefaults(cfg *viper.Viper) {
|
||||||
|
cfg.SetDefault("multinet.enabled", false)
|
||||||
|
cfg.SetDefault("multinet.balancer", "")
|
||||||
|
cfg.SetDefault("multinet.restrict", false)
|
||||||
|
cfg.SetDefault("multinet.fallback_delay", "0s")
|
||||||
|
cfg.SetDefault("multinet.subnets", "")
|
||||||
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (r *ContainerVolumeRecord) String() string {
|
||||||
|
|
||||||
func (r *LockedRecord) String() string {
|
func (r *LockedRecord) String() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"Locker OID %s %c Locked [%d]OID {...}",
|
"Object OID %s %c Lockers [%d]OID {...}",
|
||||||
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
|
||||||
tview.Borders.Vertical,
|
tview.Borders.Vertical,
|
||||||
len(r.ids),
|
len(r.ids),
|
||||||
|
|
|
@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
|
||||||
size := r.ReadVarUint()
|
size := r.ReadVarUint()
|
||||||
oids := make([]oid.ID, size)
|
oids := make([]oid.ID, size)
|
||||||
|
|
||||||
for i := uint64(0); i < size; i++ {
|
for i := range size {
|
||||||
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,9 +13,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func initAccountingService(ctx context.Context, c *cfg) {
|
func initAccountingService(ctx context.Context, c *cfg) {
|
||||||
if c.cfgMorph.client == nil {
|
c.initMorphComponents(ctx)
|
||||||
initMorphComponents(ctx, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
|
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
|
@ -26,12 +26,14 @@ import (
|
||||||
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
|
||||||
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
|
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
|
||||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
|
||||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||||
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
|
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
|
||||||
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
||||||
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||||
|
@ -56,6 +58,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||||
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
|
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
|
||||||
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
|
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
|
||||||
|
@ -107,6 +110,7 @@ type applicationConfiguration struct {
|
||||||
|
|
||||||
ObjectCfg struct {
|
ObjectCfg struct {
|
||||||
tombstoneLifetime uint64
|
tombstoneLifetime uint64
|
||||||
|
priorityMetrics []placement.Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
EngineCfg struct {
|
EngineCfg struct {
|
||||||
|
@ -230,6 +234,15 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
// Object
|
// Object
|
||||||
|
|
||||||
a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
|
a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
|
||||||
|
var pm []placement.Metric
|
||||||
|
for _, raw := range objectconfig.Get(c).Priority() {
|
||||||
|
m, err := placement.ParseMetric(raw)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pm = append(pm, m)
|
||||||
|
}
|
||||||
|
a.ObjectCfg.priorityMetrics = pm
|
||||||
|
|
||||||
// Storage Engine
|
// Storage Engine
|
||||||
|
|
||||||
|
@ -436,6 +449,8 @@ type shared struct {
|
||||||
metricsCollector *metrics.NodeMetrics
|
metricsCollector *metrics.NodeMetrics
|
||||||
|
|
||||||
metricsSvc *objectService.MetricCollector
|
metricsSvc *objectService.MetricCollector
|
||||||
|
|
||||||
|
dialerSource *internalNet.DialerSource
|
||||||
}
|
}
|
||||||
|
|
||||||
// dynamicConfiguration stores parameters of the
|
// dynamicConfiguration stores parameters of the
|
||||||
|
@ -571,6 +586,9 @@ func (c *cfgGRPC) dropConnection(endpoint string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type cfgMorph struct {
|
type cfgMorph struct {
|
||||||
|
initialized bool
|
||||||
|
guard sync.Mutex
|
||||||
|
|
||||||
client *client.Client
|
client *client.Client
|
||||||
|
|
||||||
notaryEnabled bool
|
notaryEnabled bool
|
||||||
|
@ -760,12 +778,18 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
|
||||||
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
|
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
nodeMetrics := metrics.NewNodeMetrics()
|
||||||
|
|
||||||
|
ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics()))
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
cacheOpts := cache.ClientCacheOpts{
|
cacheOpts := cache.ClientCacheOpts{
|
||||||
DialTimeout: apiclientconfig.DialTimeout(appCfg),
|
DialTimeout: apiclientconfig.DialTimeout(appCfg),
|
||||||
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
|
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
|
||||||
Key: &key.PrivateKey,
|
Key: &key.PrivateKey,
|
||||||
AllowExternal: apiclientconfig.AllowExternal(appCfg),
|
AllowExternal: apiclientconfig.AllowExternal(appCfg),
|
||||||
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
|
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
|
||||||
|
DialerSource: ds,
|
||||||
}
|
}
|
||||||
|
|
||||||
return shared{
|
return shared{
|
||||||
|
@ -777,10 +801,29 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
|
||||||
bgClientCache: cache.NewSDKClientCache(cacheOpts),
|
bgClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||||
putClientCache: cache.NewSDKClientCache(cacheOpts),
|
putClientCache: cache.NewSDKClientCache(cacheOpts),
|
||||||
persistate: persistate,
|
persistate: persistate,
|
||||||
metricsCollector: metrics.NewNodeMetrics(),
|
metricsCollector: nodeMetrics,
|
||||||
|
dialerSource: ds,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config {
|
||||||
|
result := internalNet.Config{
|
||||||
|
Enabled: multinet.Enabled(appCfg),
|
||||||
|
Balancer: multinet.Balancer(appCfg),
|
||||||
|
Restrict: multinet.Restrict(appCfg),
|
||||||
|
FallbackDelay: multinet.FallbackDelay(appCfg),
|
||||||
|
Metrics: m,
|
||||||
|
}
|
||||||
|
sn := multinet.Subnets(appCfg)
|
||||||
|
for _, s := range sn {
|
||||||
|
result.Subnets = append(result.Subnets, internalNet.Subnet{
|
||||||
|
Prefix: s.Mask,
|
||||||
|
SourceIPs: s.SourceIPs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
|
func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
|
||||||
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
|
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
@ -1147,17 +1190,15 @@ func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
||||||
return pool
|
return pool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) {
|
func (c *cfg) LocalNodeInfo() *netmap.NodeInfo {
|
||||||
var res netmapV2.NodeInfo
|
var res netmap.NodeInfo
|
||||||
|
|
||||||
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
||||||
if ok {
|
if ok {
|
||||||
ni.WriteToV2(&res)
|
res = ni
|
||||||
} else {
|
} else {
|
||||||
c.cfgNodeInfo.localInfo.WriteToV2(&res)
|
res = c.cfgNodeInfo.localInfo
|
||||||
}
|
}
|
||||||
|
return &res
|
||||||
return &res, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// setContractNodeInfo rewrites local node info from the FrostFS network map.
|
// setContractNodeInfo rewrites local node info from the FrostFS network map.
|
||||||
|
@ -1336,6 +1377,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil {
|
||||||
|
c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1421,10 +1467,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
|
||||||
|
|
||||||
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
|
func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
|
||||||
return container.NewInfoProvider(func() (container.Source, error) {
|
return container.NewInfoProvider(func() (container.Source, error) {
|
||||||
// threadsafe: called on init or on sighup when morph initialized
|
c.initMorphComponents(ctx)
|
||||||
if c.cfgMorph.client == nil {
|
|
||||||
initMorphComponents(ctx, c)
|
|
||||||
}
|
|
||||||
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
|
cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package config_test
|
package config_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) {
|
||||||
|
|
||||||
envName := strings.ToUpper(
|
envName := strings.ToUpper(
|
||||||
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
||||||
err := os.Setenv(envName, value)
|
t.Setenv(envName, value)
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
c := configtest.EmptyConfig()
|
c := configtest.EmptyConfig()
|
||||||
|
|
||||||
|
|
62
cmd/frostfs-node/config/multinet/config.go
Normal file
62
cmd/frostfs-node/config/multinet/config.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package multinet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
subsection = "multinet"
|
||||||
|
|
||||||
|
FallbackDelayDefault = 300 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// Enabled returns the value of "enabled" config parameter from "multinet" section.
|
||||||
|
func Enabled(c *config.Config) bool {
|
||||||
|
return config.BoolSafe(c.Sub(subsection), "enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
type Subnet struct {
|
||||||
|
Mask string
|
||||||
|
SourceIPs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subnets returns the value of "subnets" config parameter from "multinet" section.
|
||||||
|
func Subnets(c *config.Config) []Subnet {
|
||||||
|
var result []Subnet
|
||||||
|
sub := c.Sub(subsection).Sub("subnets")
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
s := sub.Sub(strconv.FormatInt(int64(i), 10))
|
||||||
|
mask := config.StringSafe(s, "mask")
|
||||||
|
if mask == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sourceIPs := config.StringSliceSafe(s, "source_ips")
|
||||||
|
result = append(result, Subnet{
|
||||||
|
Mask: mask,
|
||||||
|
SourceIPs: sourceIPs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Balancer returns the value of "balancer" config parameter from "multinet" section.
|
||||||
|
func Balancer(c *config.Config) string {
|
||||||
|
return config.StringSafe(c.Sub(subsection), "balancer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restrict returns the value of "restrict" config parameter from "multinet" section.
|
||||||
|
func Restrict(c *config.Config) bool {
|
||||||
|
return config.BoolSafe(c.Sub(subsection), "restrict")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
|
||||||
|
func FallbackDelay(c *config.Config) time.Duration {
|
||||||
|
fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
|
||||||
|
if fd != 0 { // negative value means no fallback
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
return FallbackDelayDefault
|
||||||
|
}
|
52
cmd/frostfs-node/config/multinet/config_test.go
Normal file
52
cmd/frostfs-node/config/multinet/config_test.go
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package multinet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||||
|
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMultinetSection(t *testing.T) {
|
||||||
|
t.Run("defaults", func(t *testing.T) {
|
||||||
|
empty := configtest.EmptyConfig()
|
||||||
|
require.Equal(t, false, Enabled(empty))
|
||||||
|
require.Equal(t, ([]Subnet)(nil), Subnets(empty))
|
||||||
|
require.Equal(t, "", Balancer(empty))
|
||||||
|
require.Equal(t, false, Restrict(empty))
|
||||||
|
require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
|
||||||
|
})
|
||||||
|
|
||||||
|
const path = "../../../../config/example/node"
|
||||||
|
|
||||||
|
fileConfigTest := func(c *config.Config) {
|
||||||
|
require.Equal(t, true, Enabled(c))
|
||||||
|
require.Equal(t, []Subnet{
|
||||||
|
{
|
||||||
|
Mask: "192.168.219.174/24",
|
||||||
|
SourceIPs: []string{
|
||||||
|
"192.168.218.185",
|
||||||
|
"192.168.219.185",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Mask: "10.78.70.74/24",
|
||||||
|
SourceIPs: []string{
|
||||||
|
"10.78.70.185",
|
||||||
|
"10.78.71.185",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, Subnets(c))
|
||||||
|
require.Equal(t, "roundrobin", Balancer(c))
|
||||||
|
require.Equal(t, false, Restrict(c))
|
||||||
|
require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
configtest.ForEachFileType(path, fileConfigTest)
|
||||||
|
|
||||||
|
t.Run("ENV", func(t *testing.T) {
|
||||||
|
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||||
|
})
|
||||||
|
}
|
|
@ -10,10 +10,17 @@ type PutConfig struct {
|
||||||
cfg *config.Config
|
cfg *config.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetConfig is a wrapper over "get" config section which provides access
|
||||||
|
// to object get pipeline configuration of object service.
|
||||||
|
type GetConfig struct {
|
||||||
|
cfg *config.Config
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
subsection = "object"
|
subsection = "object"
|
||||||
|
|
||||||
putSubsection = "put"
|
putSubsection = "put"
|
||||||
|
getSubsection = "get"
|
||||||
|
|
||||||
// PutPoolSizeDefault is a default value of routine pool size to
|
// PutPoolSizeDefault is a default value of routine pool size to
|
||||||
// process object.Put requests in object service.
|
// process object.Put requests in object service.
|
||||||
|
@ -56,3 +63,16 @@ func (g PutConfig) PoolSizeLocal() int {
|
||||||
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
|
||||||
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get returns structure that provides access to "get" subsection of
|
||||||
|
// "object" section.
|
||||||
|
func Get(c *config.Config) GetConfig {
|
||||||
|
return GetConfig{
|
||||||
|
c.Sub(subsection).Sub(getSubsection),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority returns the value of "priority" config parameter.
|
||||||
|
func (g GetConfig) Priority() []string {
|
||||||
|
return config.StringSliceSafe(g.cfg, "priority")
|
||||||
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func Address(c *config.Config) string {
|
||||||
return AddressDefault
|
return AddressDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockRates returns the value of "block_rate" config parameter
|
// BlockRate returns the value of "block_rate" config parameter
|
||||||
// from "pprof" section.
|
// from "pprof" section.
|
||||||
func BlockRate(c *config.Config) int {
|
func BlockRate(c *config.Config) int {
|
||||||
s := c.Sub(subsection)
|
s := c.Sub(subsection)
|
||||||
|
|
|
@ -11,8 +11,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func fromFile(path string) *config.Config {
|
func fromFile(path string) *config.Config {
|
||||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
|
||||||
|
|
||||||
return config.New(path, "", "")
|
return config.New(path, "", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,15 +38,6 @@ func ForEachFileType(pref string, f func(*config.Config)) {
|
||||||
|
|
||||||
// ForEnvFileType creates config from `<pref>.env` file.
|
// ForEnvFileType creates config from `<pref>.env` file.
|
||||||
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
||||||
envs := os.Environ()
|
|
||||||
t.Cleanup(func() {
|
|
||||||
os.Clearenv()
|
|
||||||
for _, env := range envs {
|
|
||||||
keyValue := strings.Split(env, "=")
|
|
||||||
os.Setenv(keyValue[0], keyValue[1])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
f(fromEnvFile(t, pref+".env"))
|
f(fromEnvFile(t, pref+".env"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +62,6 @@ func loadEnv(t testing.TB, path string) {
|
||||||
|
|
||||||
v = strings.Trim(v, `"`)
|
v = strings.Trim(v, `"`)
|
||||||
|
|
||||||
err = os.Setenv(k, v)
|
t.Setenv(k, v)
|
||||||
require.NoError(t, err, "can't set environment variable")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
||||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
||||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
|
@ -42,7 +43,7 @@ func initContainerService(_ context.Context, c *cfg) {
|
||||||
|
|
||||||
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg)
|
||||||
if cacheSize > 0 {
|
if cacheSize > 0 {
|
||||||
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL)
|
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
c.shared.frostfsidClient = frostfsIDSubjectProvider
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||||
|
@ -9,57 +10,101 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type subjectWithError struct {
|
||||||
|
subject *client.Subject
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type subjectExtWithError struct {
|
||||||
|
subject *client.SubjectExtended
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
type morphFrostfsIDCache struct {
|
type morphFrostfsIDCache struct {
|
||||||
subjProvider frostfsidcore.SubjectProvider
|
subjProvider frostfsidcore.SubjectProvider
|
||||||
|
|
||||||
subjCache *expirable.LRU[util.Uint160, *client.Subject]
|
subjCache *expirable.LRU[util.Uint160, subjectWithError]
|
||||||
|
|
||||||
subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended]
|
subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError]
|
||||||
|
|
||||||
|
metrics cacheMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider {
|
func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider {
|
||||||
return &morphFrostfsIDCache{
|
return &morphFrostfsIDCache{
|
||||||
subjProvider: subjProvider,
|
subjProvider: subjProvider,
|
||||||
|
|
||||||
subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl),
|
subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl),
|
||||||
|
|
||||||
subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl),
|
subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl),
|
||||||
|
|
||||||
|
metrics: metrics,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
|
func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) {
|
||||||
|
hit := false
|
||||||
|
startedAt := time.Now()
|
||||||
|
defer func() {
|
||||||
|
m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit)
|
||||||
|
}()
|
||||||
|
|
||||||
result, found := m.subjCache.Get(addr)
|
result, found := m.subjCache.Get(addr)
|
||||||
if found {
|
if found {
|
||||||
return result, nil
|
hit = true
|
||||||
|
return result.subject, result.err
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := m.subjProvider.GetSubject(addr)
|
subj, err := m.subjProvider.GetSubject(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if m.isCacheableError(err) {
|
||||||
|
m.subjCache.Add(addr, subjectWithError{
|
||||||
|
err: err,
|
||||||
|
})
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.subjCache.Add(addr, result)
|
m.subjCache.Add(addr, subjectWithError{subject: subj})
|
||||||
return result, nil
|
return subj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
|
func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
|
||||||
subjExt, found := m.subjExtCache.Get(addr)
|
hit := false
|
||||||
|
startedAt := time.Now()
|
||||||
|
defer func() {
|
||||||
|
m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit)
|
||||||
|
}()
|
||||||
|
|
||||||
|
result, found := m.subjExtCache.Get(addr)
|
||||||
if found {
|
if found {
|
||||||
return subjExt, nil
|
hit = true
|
||||||
|
return result.subject, result.err
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
subjExt, err := m.subjProvider.GetSubjectExtended(addr)
|
||||||
subjExt, err = m.subjProvider.GetSubjectExtended(addr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if m.isCacheableError(err) {
|
||||||
|
m.subjExtCache.Add(addr, subjectExtWithError{
|
||||||
|
err: err,
|
||||||
|
})
|
||||||
|
m.subjCache.Add(addr, subjectWithError{
|
||||||
|
err: err,
|
||||||
|
})
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.subjExtCache.Add(addr, subjExt)
|
m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt})
|
||||||
m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt))
|
m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)})
|
||||||
|
|
||||||
return subjExt, nil
|
return subjExt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *morphFrostfsIDCache) isCacheableError(err error) bool {
|
||||||
|
return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage)
|
||||||
|
}
|
||||||
|
|
||||||
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
|
func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject {
|
||||||
return &client.Subject{
|
return &client.Subject{
|
||||||
PrimaryKey: subjExt.PrimaryKey,
|
PrimaryKey: subjExt.PrimaryKey,
|
||||||
|
|
|
@ -28,52 +28,18 @@ const (
|
||||||
notaryDepositRetriesAmount = 300
|
notaryDepositRetriesAmount = 300
|
||||||
)
|
)
|
||||||
|
|
||||||
func initMorphComponents(ctx context.Context, c *cfg) {
|
func (c *cfg) initMorphComponents(ctx context.Context) {
|
||||||
addresses := morphconfig.RPCEndpoint(c.appCfg)
|
c.cfgMorph.guard.Lock()
|
||||||
|
defer c.cfgMorph.guard.Unlock()
|
||||||
// Morph client stable-sorts endpoints by priority. Shuffle here to randomize
|
if c.cfgMorph.initialized {
|
||||||
// order of endpoints with the same priority.
|
return
|
||||||
rand.Shuffle(len(addresses), func(i, j int) {
|
|
||||||
addresses[i], addresses[j] = addresses[j], addresses[i]
|
|
||||||
})
|
|
||||||
|
|
||||||
cli, err := client.New(ctx,
|
|
||||||
c.key,
|
|
||||||
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
|
|
||||||
client.WithLogger(c.log),
|
|
||||||
client.WithMetrics(c.metricsCollector.MorphClientMetrics()),
|
|
||||||
client.WithEndpoints(addresses...),
|
|
||||||
client.WithConnLostCallback(func() {
|
|
||||||
c.internalErr <- errors.New("morph connection has been lost")
|
|
||||||
}),
|
|
||||||
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
|
||||||
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
|
||||||
zap.Any("endpoints", addresses),
|
|
||||||
zap.String("error", err.Error()),
|
|
||||||
)
|
|
||||||
|
|
||||||
fatalOnErr(err)
|
|
||||||
}
|
}
|
||||||
|
initMorphClient(ctx, c)
|
||||||
c.onShutdown(func() {
|
|
||||||
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
|
||||||
cli.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := cli.SetGroupSignerScope(); err != nil {
|
|
||||||
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.cfgMorph.client = cli
|
|
||||||
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
|
|
||||||
|
|
||||||
lookupScriptHashesInNNS(c) // smart contract auto negotiation
|
lookupScriptHashesInNNS(c) // smart contract auto negotiation
|
||||||
|
|
||||||
if c.cfgMorph.notaryEnabled {
|
if c.cfgMorph.notaryEnabled {
|
||||||
err = c.cfgMorph.client.EnableNotarySupport(
|
err := c.cfgMorph.client.EnableNotarySupport(
|
||||||
client.WithProxyContract(
|
client.WithProxyContract(
|
||||||
c.cfgMorph.proxyScriptHash,
|
c.cfgMorph.proxyScriptHash,
|
||||||
),
|
),
|
||||||
|
@ -109,6 +75,51 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
|
|
||||||
c.netMapSource = netmapSource
|
c.netMapSource = netmapSource
|
||||||
c.cfgNetmap.wrapper = wrap
|
c.cfgNetmap.wrapper = wrap
|
||||||
|
c.cfgMorph.initialized = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func initMorphClient(ctx context.Context, c *cfg) {
|
||||||
|
addresses := morphconfig.RPCEndpoint(c.appCfg)
|
||||||
|
|
||||||
|
// Morph client stable-sorts endpoints by priority. Shuffle here to randomize
|
||||||
|
// order of endpoints with the same priority.
|
||||||
|
rand.Shuffle(len(addresses), func(i, j int) {
|
||||||
|
addresses[i], addresses[j] = addresses[j], addresses[i]
|
||||||
|
})
|
||||||
|
|
||||||
|
cli, err := client.New(ctx,
|
||||||
|
c.key,
|
||||||
|
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
|
||||||
|
client.WithLogger(c.log),
|
||||||
|
client.WithMetrics(c.metricsCollector.MorphClientMetrics()),
|
||||||
|
client.WithEndpoints(addresses...),
|
||||||
|
client.WithConnLostCallback(func() {
|
||||||
|
c.internalErr <- errors.New("morph connection has been lost")
|
||||||
|
}),
|
||||||
|
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
||||||
|
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
|
||||||
|
client.WithDialerSource(c.dialerSource),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||||
|
zap.Any("endpoints", addresses),
|
||||||
|
zap.String("error", err.Error()),
|
||||||
|
)
|
||||||
|
|
||||||
|
fatalOnErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.onShutdown(func() {
|
||||||
|
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||||
|
cli.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := cli.SetGroupSignerScope(); err != nil {
|
||||||
|
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cfgMorph.client = cli
|
||||||
|
c.cfgMorph.notaryEnabled = cli.ProbeNotary()
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||||
|
|
|
@ -143,9 +143,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
|
||||||
parseAttributes(c)
|
parseAttributes(c)
|
||||||
c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
|
c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
|
||||||
|
|
||||||
if c.cfgMorph.client == nil {
|
c.initMorphComponents(ctx)
|
||||||
initMorphComponents(ctx, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
initNetmapState(c)
|
initNetmapState(c)
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
|
||||||
return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
|
return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns node owner ID calculated from configured private key.
|
// LocalNodeID returns node owner ID calculated from configured private key.
|
||||||
//
|
//
|
||||||
// Implements method needed for Object.Delete service.
|
// Implements method needed for Object.Delete service.
|
||||||
func (i *delNetInfo) LocalNodeID() user.ID {
|
func (i *delNetInfo) LocalNodeID() user.ID {
|
||||||
|
@ -178,7 +178,8 @@ func initObjectService(c *cfg) {
|
||||||
|
|
||||||
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
|
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
|
||||||
|
|
||||||
sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
|
sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource,
|
||||||
|
c.ObjectCfg.priorityMetrics)
|
||||||
|
|
||||||
*c.cfgObject.getSvc = *sGet // need smth better
|
*c.cfgObject.getSvc = *sGet // need smth better
|
||||||
|
|
||||||
|
@ -389,6 +390,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
|
||||||
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
|
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
|
||||||
coreConstructor *cache.ClientCache,
|
coreConstructor *cache.ClientCache,
|
||||||
containerSource containercore.Source,
|
containerSource containercore.Source,
|
||||||
|
priorityMetrics []placement.Metric,
|
||||||
) *getsvc.Service {
|
) *getsvc.Service {
|
||||||
ls := c.cfgObject.cfgLocalStorage.localStorage
|
ls := c.cfgObject.cfgLocalStorage.localStorage
|
||||||
|
|
||||||
|
@ -398,6 +400,8 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra
|
||||||
ls,
|
ls,
|
||||||
traverseGen.WithTraverseOptions(
|
traverseGen.WithTraverseOptions(
|
||||||
placement.SuccessAfter(1),
|
placement.SuccessAfter(1),
|
||||||
|
placement.WithPriorityMetrics(priorityMetrics),
|
||||||
|
placement.WithNodeState(c),
|
||||||
),
|
),
|
||||||
coreConstructor,
|
coreConstructor,
|
||||||
containerSource,
|
containerSource,
|
||||||
|
|
|
@ -54,7 +54,6 @@ func initTreeService(c *cfg) {
|
||||||
cli: c.shared.cnrClient,
|
cli: c.shared.cnrClient,
|
||||||
}),
|
}),
|
||||||
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
|
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
|
||||||
tree.WithEACLSource(c.cfgObject.eaclSource),
|
|
||||||
tree.WithNetmapSource(c.netMapSource),
|
tree.WithNetmapSource(c.netMapSource),
|
||||||
tree.WithPrivateKey(&c.key.PrivateKey),
|
tree.WithPrivateKey(&c.key.PrivateKey),
|
||||||
tree.WithLogger(c.log),
|
tree.WithLogger(c.log),
|
||||||
|
@ -68,6 +67,7 @@ func initTreeService(c *cfg) {
|
||||||
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
|
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
|
||||||
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
|
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
|
||||||
tree.WithNetmapState(c.cfgNetmap.state),
|
tree.WithNetmapState(c.cfgNetmap.state),
|
||||||
|
tree.WithDialerSource(c.dialerSource),
|
||||||
)
|
)
|
||||||
|
|
||||||
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
|
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
|
||||||
|
|
|
@ -80,3 +80,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
|
||||||
FROSTFS_IR_PROMETHEUS_ENABLED=true
|
FROSTFS_IR_PROMETHEUS_ENABLED=true
|
||||||
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
|
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
|
||||||
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
|
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
|
||||||
|
|
||||||
|
FROSTFS_MULTINET_ENABLED=true
|
||||||
|
FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
|
||||||
|
FROSTFS_MULTINET_BALANCER=roundrobin
|
||||||
|
FROSTFS_MULTINET_RESTRICT=false
|
||||||
|
FROSTFS_MULTINET_FALLBACK_DELAY=350ms
|
||||||
|
|
|
@ -123,3 +123,18 @@ prometheus:
|
||||||
|
|
||||||
systemdnotify:
|
systemdnotify:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
|
multinet:
|
||||||
|
enabled: true
|
||||||
|
subnets:
|
||||||
|
- mask: 192.168.219.174/24
|
||||||
|
source_ips:
|
||||||
|
- 192.168.218.185
|
||||||
|
- 192.168.219.185
|
||||||
|
- mask: 10.78.70.74/24
|
||||||
|
source_ips:
|
||||||
|
- 10.78.70.185
|
||||||
|
- 10.78.71.185
|
||||||
|
balancer: roundrobin
|
||||||
|
restrict: false
|
||||||
|
fallback_delay: 350ms
|
||||||
|
|
|
@ -87,6 +87,7 @@ FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||||
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||||
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
||||||
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
||||||
|
FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE"
|
||||||
|
|
||||||
# Storage engine section
|
# Storage engine section
|
||||||
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
|
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
|
||||||
|
@ -206,3 +207,13 @@ FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||||
|
|
||||||
# AUDIT section
|
# AUDIT section
|
||||||
FROSTFS_AUDIT_ENABLED=true
|
FROSTFS_AUDIT_ENABLED=true
|
||||||
|
|
||||||
|
# MULTINET section
|
||||||
|
FROSTFS_MULTINET_ENABLED=true
|
||||||
|
FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
|
||||||
|
FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
|
||||||
|
FROSTFS_MULTINET_BALANCER=roundrobin
|
||||||
|
FROSTFS_MULTINET_RESTRICT=false
|
||||||
|
FROSTFS_MULTINET_FALLBACK_DELAY=350ms
|
||||||
|
|
|
@ -131,6 +131,9 @@
|
||||||
"remote_pool_size": 100,
|
"remote_pool_size": 100,
|
||||||
"local_pool_size": 200,
|
"local_pool_size": 200,
|
||||||
"skip_session_token_issuer_verification": true
|
"skip_session_token_issuer_verification": true
|
||||||
|
},
|
||||||
|
"get": {
|
||||||
|
"priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"storage": {
|
"storage": {
|
||||||
|
@ -264,5 +267,27 @@
|
||||||
},
|
},
|
||||||
"audit": {
|
"audit": {
|
||||||
"enabled": true
|
"enabled": true
|
||||||
|
},
|
||||||
|
"multinet": {
|
||||||
|
"enabled": true,
|
||||||
|
"subnets": [
|
||||||
|
{
|
||||||
|
"mask": "192.168.219.174/24",
|
||||||
|
"source_ips": [
|
||||||
|
"192.168.218.185",
|
||||||
|
"192.168.219.185"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mask": "10.78.70.74/24",
|
||||||
|
"source_ips":[
|
||||||
|
"10.78.70.185",
|
||||||
|
"10.78.71.185"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"balancer": "roundrobin",
|
||||||
|
"restrict": false,
|
||||||
|
"fallback_delay": "350ms"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,6 +114,10 @@ object:
|
||||||
remote_pool_size: 100 # number of async workers for remote PUT operations
|
remote_pool_size: 100 # number of async workers for remote PUT operations
|
||||||
local_pool_size: 200 # number of async workers for local PUT operations
|
local_pool_size: 200 # number of async workers for local PUT operations
|
||||||
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
||||||
|
get:
|
||||||
|
priority: # list of metrics of nodes for prioritization
|
||||||
|
- $attribute:ClusterName
|
||||||
|
- $attribute:UN-LOCODE
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
# note: shard configuration can be omitted for relay node (see `node.relay`)
|
# note: shard configuration can be omitted for relay node (see `node.relay`)
|
||||||
|
@ -240,3 +244,18 @@ runtime:
|
||||||
|
|
||||||
audit:
|
audit:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
|
multinet:
|
||||||
|
enabled: true
|
||||||
|
subnets:
|
||||||
|
- mask: 192.168.219.174/24
|
||||||
|
source_ips:
|
||||||
|
- 192.168.218.185
|
||||||
|
- 192.168.219.185
|
||||||
|
- mask: 10.78.70.74/24
|
||||||
|
source_ips:
|
||||||
|
- 10.78.70.185
|
||||||
|
- 10.78.71.185
|
||||||
|
balancer: roundrobin
|
||||||
|
restrict: false
|
||||||
|
fallback_delay: 350ms
|
||||||
|
|
5
debian/changelog
vendored
5
debian/changelog
vendored
|
@ -1,5 +0,0 @@
|
||||||
frostfs-node (0.0.1) stable; urgency=medium
|
|
||||||
|
|
||||||
* Initial package build
|
|
||||||
|
|
||||||
-- TrueCloudLab <tech@frostfs.info> Tue, 25 Oct 2022 21:10:49 +0300
|
|
2
debian/clean
vendored
2
debian/clean
vendored
|
@ -1,2 +0,0 @@
|
||||||
man/
|
|
||||||
debian/*.bash-completion
|
|
39
debian/control
vendored
39
debian/control
vendored
|
@ -1,39 +0,0 @@
|
||||||
Source: frostfs-node
|
|
||||||
Section: misc
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: TrueCloudLab <tech@frostfs.info>
|
|
||||||
Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
|
|
||||||
Standards-Version: 4.5.1
|
|
||||||
Homepage: https://fs.neo.org/
|
|
||||||
Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git
|
|
||||||
Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node
|
|
||||||
|
|
||||||
Package: frostfs-storage
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: FrostFS Storage node
|
|
||||||
FrostFS is a decentralized distributed object storage integrated with the NEO
|
|
||||||
Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
|
|
||||||
of storing and distributing user's data. Any Neo user may participate in the
|
|
||||||
network and get paid for providing storage resources to other users or store
|
|
||||||
their data in FrostFS and pay a competitive price for it.
|
|
||||||
|
|
||||||
Package: frostfs-ir
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${misc:Depends}, frostfs-locode-db
|
|
||||||
Description: FrostFS InnerRing node
|
|
||||||
FrostFS is a decentralized distributed object storage integrated with the NEO
|
|
||||||
Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
|
|
||||||
of storing and distributing user's data. Any Neo user may participate in the
|
|
||||||
network and get paid for providing storage resources to other users or store
|
|
||||||
their data in FrostFS and pay a competitive price for it.
|
|
||||||
|
|
||||||
Package: frostfs-cli
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: CLI tools for FrostFS
|
|
||||||
FrostFS is a decentralized distributed object storage integrated with the NEO
|
|
||||||
Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care
|
|
||||||
of storing and distributing user's data. Any Neo user may participate in the
|
|
||||||
network and get paid for providing storage resources to other users or store
|
|
||||||
their data in FrostFS and pay a competitive price for it.
|
|
23
debian/copyright
vendored
23
debian/copyright
vendored
|
@ -1,23 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: frostfs-node
|
|
||||||
Upstream-Contact: tech@frostfs.info
|
|
||||||
Source: https://git.frostfs.info/TrueCloudLab/frostfs-node
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project
|
|
||||||
2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project
|
|
||||||
(https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md)
|
|
||||||
|
|
||||||
License: GPL-3
|
|
||||||
This program is free software: you can redistribute it and/or modify it
|
|
||||||
under the terms of the GNU General Public License as published
|
|
||||||
by the Free Software Foundation; version 3.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program or at /usr/share/common-licenses/GPL-3
|
|
||||||
If not, see <http://www.gnu.org/licenses/>.
|
|
4
debian/frostfs-cli.docs
vendored
4
debian/frostfs-cli.docs
vendored
|
@ -1,4 +0,0 @@
|
||||||
CONTRIBUTING.md
|
|
||||||
CREDITS.md
|
|
||||||
README.md
|
|
||||||
cmd/frostfs-adm/docs
|
|
3
debian/frostfs-cli.install
vendored
3
debian/frostfs-cli.install
vendored
|
@ -1,3 +0,0 @@
|
||||||
bin/frostfs-adm usr/bin
|
|
||||||
bin/frostfs-cli usr/bin
|
|
||||||
bin/frostfs-lens usr/bin
|
|
1
debian/frostfs-cli.manpages
vendored
1
debian/frostfs-cli.manpages
vendored
|
@ -1 +0,0 @@
|
||||||
man/*
|
|
2
debian/frostfs-ir.dirs
vendored
2
debian/frostfs-ir.dirs
vendored
|
@ -1,2 +0,0 @@
|
||||||
/etc/frostfs/ir
|
|
||||||
/var/lib/frostfs/ir
|
|
3
debian/frostfs-ir.docs
vendored
3
debian/frostfs-ir.docs
vendored
|
@ -1,3 +0,0 @@
|
||||||
CONTRIBUTING.md
|
|
||||||
CREDITS.md
|
|
||||||
README.md
|
|
1
debian/frostfs-ir.install
vendored
1
debian/frostfs-ir.install
vendored
|
@ -1 +0,0 @@
|
||||||
bin/frostfs-ir usr/bin
|
|
51
debian/frostfs-ir.postinst
vendored
51
debian/frostfs-ir.postinst
vendored
|
@ -1,51 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postinst> `configure' <most-recently-configured-version>
|
|
||||||
# * <old-postinst> `abort-upgrade' <new version>
|
|
||||||
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
|
|
||||||
# <new-version>
|
|
||||||
# * <postinst> `abort-remove'
|
|
||||||
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
|
|
||||||
# <failed-install-package> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
USERNAME=ir
|
|
||||||
id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir
|
|
||||||
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
|
|
||||||
chmod -f 0750 /etc/frostfs/$USERNAME
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
|
|
||||||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
|
|
||||||
chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
|
|
||||||
fi
|
|
||||||
USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)"
|
|
||||||
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
|
|
||||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
40
debian/frostfs-ir.postrm
vendored
40
debian/frostfs-ir.postrm
vendored
|
@ -1,40 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postrm> `remove'
|
|
||||||
# * <postrm> `purge'
|
|
||||||
# * <old-postrm> `upgrade' <new-version>
|
|
||||||
# * <new-postrm> `failed-upgrade' <old-version>
|
|
||||||
# * <new-postrm> `abort-install'
|
|
||||||
# * <new-postrm> `abort-install' <old-version>
|
|
||||||
# * <new-postrm> `abort-upgrade' <old-version>
|
|
||||||
# * <disappearer's-postrm> `disappear' <overwriter>
|
|
||||||
# <overwriter-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
purge)
|
|
||||||
rm -rf /var/lib/frostfs/ir/*
|
|
||||||
;;
|
|
||||||
|
|
||||||
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postrm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
34
debian/frostfs-ir.preinst
vendored
34
debian/frostfs-ir.preinst
vendored
|
@ -1,34 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <new-preinst> `install'
|
|
||||||
# * <new-preinst> `install' <old-version>
|
|
||||||
# * <new-preinst> `upgrade' <old-version>
|
|
||||||
# * <old-preinst> `abort-upgrade' <new-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
install|upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "preinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
37
debian/frostfs-ir.prerm
vendored
37
debian/frostfs-ir.prerm
vendored
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <prerm> `remove'
|
|
||||||
# * <old-prerm> `upgrade' <new-version>
|
|
||||||
# * <new-prerm> `failed-upgrade' <old-version>
|
|
||||||
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
|
|
||||||
# * <deconfigured's-prerm> `deconfigure' `in-favour'
|
|
||||||
# <package-being-installed> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
remove|upgrade|deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
failed-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "prerm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
17
debian/frostfs-ir.service
vendored
17
debian/frostfs-ir.service
vendored
|
@ -1,17 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=FrostFS InnerRing node
|
|
||||||
Requires=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
NotifyAccess=all
|
|
||||||
ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml
|
|
||||||
User=frostfs-ir
|
|
||||||
Group=frostfs-ir
|
|
||||||
WorkingDirectory=/var/lib/frostfs/ir
|
|
||||||
Restart=always
|
|
||||||
RestartSec=5
|
|
||||||
PrivateTmp=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
3
debian/frostfs-storage.dirs
vendored
3
debian/frostfs-storage.dirs
vendored
|
@ -1,3 +0,0 @@
|
||||||
/etc/frostfs/storage
|
|
||||||
/srv/frostfs
|
|
||||||
/var/lib/frostfs/storage
|
|
4
debian/frostfs-storage.docs
vendored
4
debian/frostfs-storage.docs
vendored
|
@ -1,4 +0,0 @@
|
||||||
docs/storage-node-configuration.md
|
|
||||||
CONTRIBUTING.md
|
|
||||||
CREDITS.md
|
|
||||||
README.md
|
|
1
debian/frostfs-storage.install
vendored
1
debian/frostfs-storage.install
vendored
|
@ -1 +0,0 @@
|
||||||
bin/frostfs-node usr/bin
|
|
55
debian/frostfs-storage.postinst
vendored
55
debian/frostfs-storage.postinst
vendored
|
@ -1,55 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postinst> `configure' <most-recently-configured-version>
|
|
||||||
# * <old-postinst> `abort-upgrade' <new version>
|
|
||||||
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
|
|
||||||
# <new-version>
|
|
||||||
# * <postinst> `abort-remove'
|
|
||||||
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
|
|
||||||
# <failed-install-package> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
USERNAME=storage
|
|
||||||
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME
|
|
||||||
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
|
|
||||||
chmod -f 0750 /etc/frostfs/$USERNAME
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml
|
|
||||||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true
|
|
||||||
chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true
|
|
||||||
fi
|
|
||||||
USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6)
|
|
||||||
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
|
|
||||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
|
||||||
fi
|
|
||||||
USERDIR=/srv/frostfs
|
|
||||||
if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then
|
|
||||||
chown -f frostfs-$USERNAME: $USERDIR
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
40
debian/frostfs-storage.postrm
vendored
40
debian/frostfs-storage.postrm
vendored
|
@ -1,40 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postrm> `remove'
|
|
||||||
# * <postrm> `purge'
|
|
||||||
# * <old-postrm> `upgrade' <new-version>
|
|
||||||
# * <new-postrm> `failed-upgrade' <old-version>
|
|
||||||
# * <new-postrm> `abort-install'
|
|
||||||
# * <new-postrm> `abort-install' <old-version>
|
|
||||||
# * <new-postrm> `abort-upgrade' <old-version>
|
|
||||||
# * <disappearer's-postrm> `disappear' <overwriter>
|
|
||||||
# <overwriter-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
purge)
|
|
||||||
rm -rf /var/lib/frostfs/storage/*
|
|
||||||
;;
|
|
||||||
|
|
||||||
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postrm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
34
debian/frostfs-storage.preinst
vendored
34
debian/frostfs-storage.preinst
vendored
|
@ -1,34 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <new-preinst> `install'
|
|
||||||
# * <new-preinst> `install' <old-version>
|
|
||||||
# * <new-preinst> `upgrade' <old-version>
|
|
||||||
# * <old-preinst> `abort-upgrade' <new-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
install|upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "preinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
37
debian/frostfs-storage.prerm
vendored
37
debian/frostfs-storage.prerm
vendored
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <prerm> `remove'
|
|
||||||
# * <old-prerm> `upgrade' <new-version>
|
|
||||||
# * <new-prerm> `failed-upgrade' <old-version>
|
|
||||||
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
|
|
||||||
# * <deconfigured's-prerm> `deconfigure' `in-favour'
|
|
||||||
# <package-being-installed> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
remove|upgrade|deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
failed-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "prerm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
17
debian/frostfs-storage.service
vendored
17
debian/frostfs-storage.service
vendored
|
@ -1,17 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=FrostFS Storage node
|
|
||||||
Requires=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=notify
|
|
||||||
NotifyAccess=all
|
|
||||||
ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml
|
|
||||||
User=frostfs-storage
|
|
||||||
Group=frostfs-storage
|
|
||||||
WorkingDirectory=/srv/frostfs
|
|
||||||
Restart=always
|
|
||||||
RestartSec=5
|
|
||||||
PrivateTmp=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
40
debian/rules
vendored
40
debian/rules
vendored
|
@ -1,40 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
# Do not try to strip Go binaries
|
|
||||||
export DEB_BUILD_OPTIONS := nostrip
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@ --with bash-completion
|
|
||||||
|
|
||||||
override_dh_auto_test:
|
|
||||||
|
|
||||||
override_dh_auto_install:
|
|
||||||
echo $(DEB_BUILD_OPTIONS)
|
|
||||||
dh_auto_install
|
|
||||||
|
|
||||||
bin/frostfs-adm gendoc --type man man/
|
|
||||||
bin/frostfs-cli gendoc --type man man/
|
|
||||||
|
|
||||||
bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion
|
|
||||||
bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion
|
|
||||||
install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/
|
|
||||||
install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/
|
|
||||||
bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish
|
|
||||||
bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm
|
|
||||||
bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish
|
|
||||||
bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli
|
|
||||||
|
|
||||||
install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml
|
|
||||||
install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml
|
|
||||||
install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml
|
|
||||||
install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml
|
|
||||||
|
|
||||||
override_dh_installsystemd:
|
|
||||||
dh_installsystemd --no-enable --no-start --name=frostfs-ir
|
|
||||||
dh_installsystemd --no-enable --no-start --name=frostfs-storage
|
|
||||||
|
|
||||||
override_dh_installchangelogs:
|
|
||||||
dh_installchangelogs -k CHANGELOG.md
|
|
||||||
|
|
||||||
override_dh_installdocs:
|
|
||||||
dh_installdocs
|
|
1
debian/source/format
vendored
1
debian/source/format
vendored
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
|
@ -1,46 +0,0 @@
|
||||||
# Building Debian package on host
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
For now, we're assuming building for Debian 11 (stable) x86_64.
|
|
||||||
|
|
||||||
Go version 18.4 or later should already be installed, i.e. this runs
|
|
||||||
successfully:
|
|
||||||
|
|
||||||
* `make all`
|
|
||||||
|
|
||||||
## Installing packaging dependencies
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
|
|
||||||
```
|
|
||||||
|
|
||||||
Warining: number of package installed is pretty large considering dependecies.
|
|
||||||
|
|
||||||
## Package building
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make debpackage
|
|
||||||
```
|
|
||||||
|
|
||||||
## Leftovers cleaning
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make debclean
|
|
||||||
```
|
|
||||||
or
|
|
||||||
```shell
|
|
||||||
$ dh clean
|
|
||||||
```
|
|
||||||
|
|
||||||
# Package versioning
|
|
||||||
|
|
||||||
By default, package version is based on product version and may also contain git
|
|
||||||
tags and hashes.
|
|
||||||
|
|
||||||
Package version could be overwritten by setting `PKG_VERSION` variable before
|
|
||||||
build, Debian package versioning rules should be respected.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ PKG_VERSION=0.32.0 make debpackge
|
|
||||||
```
|
|
|
@ -43,11 +43,6 @@ Write new revision number into the root `VERSION` file:
|
||||||
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
|
$ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION
|
||||||
```
|
```
|
||||||
|
|
||||||
Update version in Debian package changelog file
|
|
||||||
```
|
|
||||||
$ cat debian/changelog
|
|
||||||
```
|
|
||||||
|
|
||||||
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
|
Update the supported version of `TrueCloudLab/frostfs-contract` module in root
|
||||||
`README.md` if needed.
|
`README.md` if needed.
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ There are some custom types used for brevity:
|
||||||
| `storage` | [Storage engine configuration](#storage-section) |
|
| `storage` | [Storage engine configuration](#storage-section) |
|
||||||
| `runtime` | [Runtime configuration](#runtime-section) |
|
| `runtime` | [Runtime configuration](#runtime-section) |
|
||||||
| `audit` | [Audit configuration](#audit-section) |
|
| `audit` | [Audit configuration](#audit-section) |
|
||||||
|
| `multinet` | [Multinet configuration](#multinet-section) |
|
||||||
|
|
||||||
# `control` section
|
# `control` section
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -407,13 +407,17 @@ Contains object-service related parameters.
|
||||||
object:
|
object:
|
||||||
put:
|
put:
|
||||||
remote_pool_size: 100
|
remote_pool_size: 100
|
||||||
|
get:
|
||||||
|
priority:
|
||||||
|
- $attribute:ClusterName
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
|
|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------------|
|
||||||
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
||||||
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
||||||
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
||||||
|
| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET and SEARCH requests. |
|
||||||
|
|
||||||
# `runtime` section
|
# `runtime` section
|
||||||
Contains runtime parameters.
|
Contains runtime parameters.
|
||||||
|
@ -436,5 +440,34 @@ audit:
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|---------------------|--------|---------------|---------------------------------------------------|
|
|-----------|--------|---------------|---------------------------------------------------|
|
||||||
| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. |
|
| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
|
||||||
|
|
||||||
|
|
||||||
|
# `multinet` section
|
||||||
|
Contains multinet parameters.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
multinet:
|
||||||
|
enabled: true
|
||||||
|
subnets:
|
||||||
|
- mask: 192.168.219.174/24
|
||||||
|
source_ips:
|
||||||
|
- 192.168.218.185
|
||||||
|
- 192.168.219.185
|
||||||
|
- mask: 10.78.70.74/24
|
||||||
|
source_ips:
|
||||||
|
- 10.78.70.185
|
||||||
|
- 10.78.71.185
|
||||||
|
balancer: roundrobin
|
||||||
|
restrict: false
|
||||||
|
fallback_delay: 350ms
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | Default value | Description |
|
||||||
|
| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
|
||||||
|
| `subnets` | `subnet` | empty | Resulting subnets. |
|
||||||
|
| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
|
||||||
|
| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
|
||||||
|
| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
|
||||||
|
|
9
go.mod
9
go.mod
|
@ -5,12 +5,13 @@ go 1.22
|
||||||
require (
|
require (
|
||||||
code.gitea.io/sdk/gitea v0.17.1
|
code.gitea.io/sdk/gitea v0.17.1
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||||
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
|
@ -18,6 +19,7 @@ require (
|
||||||
github.com/cheggaaa/pb v1.0.29
|
github.com/cheggaaa/pb v1.0.29
|
||||||
github.com/chzyer/readline v1.5.1
|
github.com/chzyer/readline v1.5.1
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||||
|
github.com/felixge/fgprof v0.9.5
|
||||||
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
|
||||||
github.com/gdamore/tcell/v2 v2.7.4
|
github.com/gdamore/tcell/v2 v2.7.4
|
||||||
github.com/go-pkgz/expirable-cache/v3 v3.0.0
|
github.com/go-pkgz/expirable-cache/v3 v3.0.0
|
||||||
|
@ -76,6 +78,7 @@ require (
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
|
||||||
|
@ -132,4 +135,4 @@ require (
|
||||||
rsc.io/tmplfunc v0.0.3 // indirect
|
rsc.io/tmplfunc v0.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928
|
replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07
|
||||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -14,8 +14,6 @@ const (
|
||||||
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
|
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
|
||||||
|
|
||||||
Notification = "notification"
|
Notification = "notification"
|
||||||
|
|
||||||
SkipDeprecatedNotification = "skip deprecated notification"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -41,8 +39,6 @@ const (
|
||||||
InnerringCantUpdatePersistentState = "can't update persistent state"
|
InnerringCantUpdatePersistentState = "can't update persistent state"
|
||||||
InnerringCloserError = "closer error"
|
InnerringCloserError = "closer error"
|
||||||
InnerringReadConfigFromBlockchain = "read config from blockchain"
|
InnerringReadConfigFromBlockchain = "read config from blockchain"
|
||||||
NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
|
|
||||||
NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
|
|
||||||
PolicerCouldNotGetContainer = "could not get container"
|
PolicerCouldNotGetContainer = "could not get container"
|
||||||
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
|
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
|
||||||
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
|
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
|
||||||
|
@ -61,7 +57,6 @@ const (
|
||||||
ReplicatorCouldNotReplicateObject = "could not replicate object"
|
ReplicatorCouldNotReplicateObject = "could not replicate object"
|
||||||
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
|
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
|
||||||
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
|
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
|
||||||
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
|
|
||||||
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
|
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
|
||||||
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
|
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
|
||||||
TreeSynchronizeTree = "synchronize tree"
|
TreeSynchronizeTree = "synchronize tree"
|
||||||
|
@ -107,7 +102,6 @@ const (
|
||||||
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
|
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
|
||||||
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
|
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
|
||||||
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
|
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
|
||||||
GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object"
|
|
||||||
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
|
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
|
||||||
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
|
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
|
||||||
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
|
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
|
||||||
|
@ -271,9 +265,7 @@ const (
|
||||||
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
|
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
|
||||||
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
|
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
|
||||||
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
|
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
|
||||||
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
|
|
||||||
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
|
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
|
||||||
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
|
|
||||||
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
|
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
|
||||||
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
|
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
|
||||||
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
|
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
|
||||||
|
@ -308,9 +300,6 @@ const (
|
||||||
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
|
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
|
||||||
ContainerDeleteContainerCheckFailed = "delete container check failed"
|
ContainerDeleteContainerCheckFailed = "delete container check failed"
|
||||||
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
|
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
|
||||||
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
|
|
||||||
ContainerSetEACLCheckFailed = "set EACL check failed"
|
|
||||||
ContainerCouldNotApproveSetEACL = "could not approve set EACL"
|
|
||||||
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
|
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
|
||||||
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
|
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
|
||||||
FrostFSFrostfsWorkerPool = "frostfs worker pool"
|
FrostFSFrostfsWorkerPool = "frostfs worker pool"
|
||||||
|
@ -416,11 +405,6 @@ const (
|
||||||
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
|
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
|
||||||
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
|
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
|
||||||
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
|
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
|
||||||
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
|
|
||||||
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
|
|
||||||
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
|
|
||||||
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
|
|
||||||
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
|
|
||||||
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
|
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
|
||||||
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
|
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
|
||||||
FrostFSNodeFailedInitTracing = "failed init tracing"
|
FrostFSNodeFailedInitTracing = "failed init tracing"
|
||||||
|
@ -464,7 +448,6 @@ const (
|
||||||
FSTreeCantUnmarshalObject = "can't unmarshal an object"
|
FSTreeCantUnmarshalObject = "can't unmarshal an object"
|
||||||
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
|
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
|
||||||
FSTreeCantUpdateID = "can't update object storage ID"
|
FSTreeCantUpdateID = "can't update object storage ID"
|
||||||
FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
|
|
||||||
PutSingleRedirectFailure = "failed to redirect PutSingle request"
|
PutSingleRedirectFailure = "failed to redirect PutSingle request"
|
||||||
StorageIDRetrievalFailure = "can't get storage ID from metabase"
|
StorageIDRetrievalFailure = "can't get storage ID from metabase"
|
||||||
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
|
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
|
||||||
|
@ -540,4 +523,5 @@ const (
|
||||||
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
|
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
|
||||||
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
|
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
|
||||||
WritecacheCantGetObject = "can't get an object from fstree"
|
WritecacheCantGetObject = "can't get an object from fstree"
|
||||||
|
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
|
||||||
)
|
)
|
||||||
|
|
|
@ -22,6 +22,7 @@ const (
|
||||||
grpcServerSubsystem = "grpc_server"
|
grpcServerSubsystem = "grpc_server"
|
||||||
policerSubsystem = "policer"
|
policerSubsystem = "policer"
|
||||||
commonCacheSubsystem = "common_cache"
|
commonCacheSubsystem = "common_cache"
|
||||||
|
multinetSubsystem = "multinet"
|
||||||
|
|
||||||
successLabel = "success"
|
successLabel = "success"
|
||||||
shardIDLabel = "shard_id"
|
shardIDLabel = "shard_id"
|
||||||
|
@ -41,6 +42,7 @@ const (
|
||||||
endpointLabel = "endpoint"
|
endpointLabel = "endpoint"
|
||||||
hitLabel = "hit"
|
hitLabel = "hit"
|
||||||
cacheLabel = "cache"
|
cacheLabel = "cache"
|
||||||
|
sourceIPLabel = "source_ip"
|
||||||
|
|
||||||
readWriteMode = "READ_WRITE"
|
readWriteMode = "READ_WRITE"
|
||||||
readOnlyMode = "READ_ONLY"
|
readOnlyMode = "READ_ONLY"
|
||||||
|
|
|
@ -17,6 +17,7 @@ type InnerRingServiceMetrics struct {
|
||||||
eventDuration *prometheus.HistogramVec
|
eventDuration *prometheus.HistogramVec
|
||||||
morphCacheMetrics *morphCacheMetrics
|
morphCacheMetrics *morphCacheMetrics
|
||||||
logMetrics logger.LogMetrics
|
logMetrics logger.LogMetrics
|
||||||
|
multinet *multinetMetrics
|
||||||
// nolint: unused
|
// nolint: unused
|
||||||
appInfo *ApplicationInfo
|
appInfo *ApplicationInfo
|
||||||
}
|
}
|
||||||
|
@ -51,6 +52,7 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics {
|
||||||
morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
|
morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace),
|
||||||
appInfo: NewApplicationInfo(misc.Version),
|
appInfo: NewApplicationInfo(misc.Version),
|
||||||
logMetrics: logger.NewLogMetrics(innerRingNamespace),
|
logMetrics: logger.NewLogMetrics(innerRingNamespace),
|
||||||
|
multinet: newMultinetMetrics(innerRingNamespace),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,3 +80,7 @@ func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics {
|
||||||
func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
|
func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics {
|
||||||
return m.logMetrics
|
return m.logMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics {
|
||||||
|
return m.multinet
|
||||||
|
}
|
||||||
|
|
35
internal/metrics/multinet.go
Normal file
35
internal/metrics/multinet.go
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type multinetMetrics struct {
|
||||||
|
dials *prometheus.GaugeVec
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultinetMetrics interface {
|
||||||
|
Dial(sourceIP string, success bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMultinetMetrics(ns string) *multinetMetrics {
|
||||||
|
return &multinetMetrics{
|
||||||
|
dials: metrics.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: ns,
|
||||||
|
Subsystem: multinetSubsystem,
|
||||||
|
Name: "dial_count",
|
||||||
|
Help: "Dials count performed by multinet",
|
||||||
|
}, []string{sourceIPLabel, successLabel}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *multinetMetrics) Dial(sourceIP string, success bool) {
|
||||||
|
m.dials.With(prometheus.Labels{
|
||||||
|
sourceIPLabel: sourceIP,
|
||||||
|
successLabel: strconv.FormatBool(success),
|
||||||
|
}).Inc()
|
||||||
|
}
|
|
@ -25,6 +25,7 @@ type NodeMetrics struct {
|
||||||
morphClient *morphClientMetrics
|
morphClient *morphClientMetrics
|
||||||
morphCache *morphCacheMetrics
|
morphCache *morphCacheMetrics
|
||||||
log logger.LogMetrics
|
log logger.LogMetrics
|
||||||
|
multinet *multinetMetrics
|
||||||
// nolint: unused
|
// nolint: unused
|
||||||
appInfo *ApplicationInfo
|
appInfo *ApplicationInfo
|
||||||
}
|
}
|
||||||
|
@ -53,6 +54,7 @@ func NewNodeMetrics() *NodeMetrics {
|
||||||
morphCache: newMorphCacheMetrics(namespace),
|
morphCache: newMorphCacheMetrics(namespace),
|
||||||
log: logger.NewLogMetrics(namespace),
|
log: logger.NewLogMetrics(namespace),
|
||||||
appInfo: NewApplicationInfo(misc.Version),
|
appInfo: NewApplicationInfo(misc.Version),
|
||||||
|
multinet: newMultinetMetrics(namespace),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,3 +122,7 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics {
|
||||||
func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
|
func (m *NodeMetrics) LogMetrics() logger.LogMetrics {
|
||||||
return m.log
|
return m.log
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *NodeMetrics) MultinetMetrics() MultinetMetrics {
|
||||||
|
return m.multinet
|
||||||
|
}
|
||||||
|
|
69
internal/net/config.go
Normal file
69
internal/net/config.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
"git.frostfs.info/TrueCloudLab/multinet"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errEmptySourceIPList = errors.New("empty source IP list")
|
||||||
|
|
||||||
|
type Subnet struct {
|
||||||
|
Prefix string
|
||||||
|
SourceIPs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Enabled bool
|
||||||
|
Subnets []Subnet
|
||||||
|
Balancer string
|
||||||
|
Restrict bool
|
||||||
|
FallbackDelay time.Duration
|
||||||
|
Metrics metrics.MultinetMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) toMultinetConfig() (multinet.Config, error) {
|
||||||
|
var subnets []multinet.Subnet
|
||||||
|
for _, s := range c.Subnets {
|
||||||
|
var ms multinet.Subnet
|
||||||
|
p, err := netip.ParsePrefix(s.Prefix)
|
||||||
|
if err != nil {
|
||||||
|
return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
|
||||||
|
}
|
||||||
|
ms.Prefix = p
|
||||||
|
for _, ip := range s.SourceIPs {
|
||||||
|
addr, err := netip.ParseAddr(ip)
|
||||||
|
if err != nil {
|
||||||
|
return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
|
||||||
|
}
|
||||||
|
ms.SourceIPs = append(ms.SourceIPs, addr)
|
||||||
|
}
|
||||||
|
if len(ms.SourceIPs) == 0 {
|
||||||
|
return multinet.Config{}, errEmptySourceIPList
|
||||||
|
}
|
||||||
|
subnets = append(subnets, ms)
|
||||||
|
}
|
||||||
|
return multinet.Config{
|
||||||
|
Subnets: subnets,
|
||||||
|
Balancer: multinet.BalancerType(c.Balancer),
|
||||||
|
Restrict: c.Restrict,
|
||||||
|
FallbackDelay: c.FallbackDelay,
|
||||||
|
Dialer: newDefaulDialer(),
|
||||||
|
EventHandler: newEventHandler(c.Metrics),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) equals(other Config) bool {
|
||||||
|
return c.Enabled == other.Enabled &&
|
||||||
|
slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
|
||||||
|
return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
|
||||||
|
}) &&
|
||||||
|
c.Balancer == other.Balancer &&
|
||||||
|
c.Restrict == other.Restrict &&
|
||||||
|
c.FallbackDelay == other.FallbackDelay
|
||||||
|
}
|
54
internal/net/dial_target.go
Normal file
54
internal/net/dial_target.go
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
|
||||||
|
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2014 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// parseDialTarget returns the network and address to pass to dialer.
|
||||||
|
func parseDialTarget(target string) (string, string) {
|
||||||
|
net := "tcp"
|
||||||
|
m1 := strings.Index(target, ":")
|
||||||
|
m2 := strings.Index(target, ":/")
|
||||||
|
// handle unix:addr which will fail with url.Parse
|
||||||
|
if m1 >= 0 && m2 < 0 {
|
||||||
|
if n := target[0:m1]; n == "unix" {
|
||||||
|
return n, target[m1+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m2 >= 0 {
|
||||||
|
t, err := url.Parse(target)
|
||||||
|
if err != nil {
|
||||||
|
return net, target
|
||||||
|
}
|
||||||
|
scheme := t.Scheme
|
||||||
|
addr := t.Path
|
||||||
|
if scheme == "unix" {
|
||||||
|
if addr == "" {
|
||||||
|
addr = t.Host
|
||||||
|
}
|
||||||
|
return scheme, addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return net, target
|
||||||
|
}
|
39
internal/net/dialer.go
Normal file
39
internal/net/dialer.go
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Dialer interface {
|
||||||
|
DialContext(ctx context.Context, network, address string) (net.Conn, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
|
||||||
|
return d.DialContext(ctx, "tcp", address)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDefaulDialer() net.Dialer {
|
||||||
|
// From `grpc.WithContextDialer` comment:
|
||||||
|
//
|
||||||
|
// Note: All supported releases of Go (as of December 2023) override the OS
|
||||||
|
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
||||||
|
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
|
||||||
|
// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
|
||||||
|
// option to true from the Control field. For a concrete example of how to do
|
||||||
|
// this, see internal.NetDialerWithTCPKeepalive().
|
||||||
|
//
|
||||||
|
// https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
|
||||||
|
return net.Dialer{
|
||||||
|
KeepAlive: time.Duration(-1),
|
||||||
|
Control: func(_, _ string, c syscall.RawConn) error {
|
||||||
|
return c.Control(func(fd uintptr) {
|
||||||
|
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
83
internal/net/dialer_source.go
Normal file
83
internal/net/dialer_source.go
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/multinet"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DialerSource struct {
|
||||||
|
guard sync.RWMutex
|
||||||
|
|
||||||
|
c Config
|
||||||
|
|
||||||
|
md multinet.Dialer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDialerSource(c Config) (*DialerSource, error) {
|
||||||
|
result := &DialerSource{}
|
||||||
|
if err := result.build(c); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DialerSource) build(c Config) error {
|
||||||
|
if c.Enabled {
|
||||||
|
mc, err := c.toMultinetConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
md, err := multinet.NewDialer(mc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.md = md
|
||||||
|
s.c = c
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.md = nil
|
||||||
|
s.c = c
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GrpcContextDialer returns grpc.WithContextDialer func.
|
||||||
|
// Returns nil if multinet disabled.
|
||||||
|
func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
|
||||||
|
s.guard.RLock()
|
||||||
|
defer s.guard.RUnlock()
|
||||||
|
|
||||||
|
if s.c.Enabled {
|
||||||
|
return func(ctx context.Context, address string) (net.Conn, error) {
|
||||||
|
network, address := parseDialTarget(address)
|
||||||
|
return s.md.DialContext(ctx, network, address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetContextDialer returns net.DialContext dial function.
|
||||||
|
// Returns nil if multinet disabled.
|
||||||
|
func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) {
|
||||||
|
s.guard.RLock()
|
||||||
|
defer s.guard.RUnlock()
|
||||||
|
|
||||||
|
if s.c.Enabled {
|
||||||
|
return func(ctx context.Context, network, address string) (net.Conn, error) {
|
||||||
|
return s.md.DialContext(ctx, network, address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *DialerSource) Update(c Config) error {
|
||||||
|
s.guard.Lock()
|
||||||
|
defer s.guard.Unlock()
|
||||||
|
|
||||||
|
if s.c.equals(c) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.build(c)
|
||||||
|
}
|
29
internal/net/event_handler.go
Normal file
29
internal/net/event_handler.go
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
package net
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
"git.frostfs.info/TrueCloudLab/multinet"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ multinet.EventHandler = (*metricsEventHandler)(nil)
|
||||||
|
|
||||||
|
type metricsEventHandler struct {
|
||||||
|
m metrics.MultinetMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) {
|
||||||
|
sourceIPString := "undefined"
|
||||||
|
if sourceIP != nil {
|
||||||
|
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
||||||
|
}
|
||||||
|
m.m.Dial(sourceIPString, err == nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler {
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &metricsEventHandler{m: m}
|
||||||
|
}
|
|
@ -2,6 +2,6 @@ package netmap
|
||||||
|
|
||||||
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
|
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
|
||||||
type AnnouncedKeys interface {
|
type AnnouncedKeys interface {
|
||||||
// Checks if the key was announced by a local node.
|
// IsLocalKey checks if the key was announced by a local node.
|
||||||
IsLocalKey(key []byte) bool
|
IsLocalKey(key []byte) bool
|
||||||
}
|
}
|
||||||
|
|
|
@ -463,6 +463,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
name: morphPrefix,
|
name: morphPrefix,
|
||||||
from: fromSideChainBlock,
|
from: fromSideChainBlock,
|
||||||
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
|
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
|
||||||
|
multinetMetrics: s.irMetrics.Multinet(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// create morph client
|
// create morph client
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
|
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||||
|
@ -116,6 +117,7 @@ type (
|
||||||
sgn *transaction.Signer
|
sgn *transaction.Signer
|
||||||
from uint32 // block height
|
from uint32 // block height
|
||||||
morphCacheMetric metrics.MorphCacheMetrics
|
morphCacheMetric metrics.MorphCacheMetrics
|
||||||
|
multinetMetrics metrics.MultinetMetrics
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -486,6 +488,12 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
|
||||||
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
|
return nil, fmt.Errorf("%s chain client endpoints not provided", p.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nc := parseMultinetConfig(p.cfg, p.multinetMetrics)
|
||||||
|
ds, err := internalNet.NewDialerSource(nc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("dialer source: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return client.New(
|
return client.New(
|
||||||
ctx,
|
ctx,
|
||||||
p.key,
|
p.key,
|
||||||
|
@ -498,6 +506,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
|
||||||
}),
|
}),
|
||||||
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
|
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
|
||||||
client.WithMorphCacheMetrics(p.morphCacheMetric),
|
client.WithMorphCacheMetrics(p.morphCacheMetric),
|
||||||
|
client.WithDialerSource(ds),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -542,6 +551,28 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) {
|
||||||
return extraWallets, nil
|
return extraWallets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config {
|
||||||
|
nc := internalNet.Config{
|
||||||
|
Enabled: cfg.GetBool("multinet.enabled"),
|
||||||
|
Balancer: cfg.GetString("multinet.balancer"),
|
||||||
|
Restrict: cfg.GetBool("multinet.restrict"),
|
||||||
|
FallbackDelay: cfg.GetDuration("multinet.fallback_delay"),
|
||||||
|
Metrics: m,
|
||||||
|
}
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i))
|
||||||
|
if mask == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i))
|
||||||
|
nc.Subnets = append(nc.Subnets, internalNet.Subnet{
|
||||||
|
Prefix: mask,
|
||||||
|
SourceIPs: sourceIPs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nc
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) initConfigFromBlockchain() error {
|
func (s *Server) initConfigFromBlockchain() error {
|
||||||
// get current epoch
|
// get current epoch
|
||||||
epoch, err := s.netmapClient.Epoch()
|
epoch, err := s.netmapClient.Epoch()
|
||||||
|
|
|
@ -95,7 +95,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
var emission uint64 = 100_000
|
var emission uint64 = 100_000
|
||||||
var index int = 5
|
var index int = 5
|
||||||
var parsedWallets []util.Uint160 = []util.Uint160{}
|
var parsedWallets []util.Uint160
|
||||||
|
|
||||||
alphabetContracts := innerring.NewAlphabetContracts()
|
alphabetContracts := innerring.NewAlphabetContracts()
|
||||||
for i := range index + 1 {
|
for i := range index + 1 {
|
||||||
|
@ -167,7 +167,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
var emission uint64 = 100_000
|
var emission uint64 = 100_000
|
||||||
var index int = 5
|
var index int = 5
|
||||||
var parsedWallets []util.Uint160 = []util.Uint160{}
|
var parsedWallets []util.Uint160
|
||||||
|
|
||||||
alphabetContracts := innerring.NewAlphabetContracts()
|
alphabetContracts := innerring.NewAlphabetContracts()
|
||||||
for i := range index + 1 {
|
for i := range index + 1 {
|
||||||
|
@ -176,7 +176,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
|
||||||
|
|
||||||
morphClient := &testMorphClient{}
|
morphClient := &testMorphClient{}
|
||||||
|
|
||||||
nodes := []netmap.NodeInfo{}
|
var nodes []netmap.NodeInfo
|
||||||
network := &netmap.NetMap{}
|
network := &netmap.NetMap{}
|
||||||
network.SetNodes(nodes)
|
network.SetNodes(nodes)
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,3 @@ func (cp *Processor) handleDelete(ev event.Event) {
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp *Processor) handleSetEACL(_ event.Event) {
|
|
||||||
cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL"))
|
|
||||||
}
|
|
||||||
|
|
|
@ -157,11 +157,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
|
||||||
p.SetParser(containerEvent.ParseDeleteNotary)
|
p.SetParser(containerEvent.ParseDeleteNotary)
|
||||||
pp = append(pp, p)
|
pp = append(pp, p)
|
||||||
|
|
||||||
// set EACL
|
|
||||||
p.SetRequestType(containerEvent.SetEACLNotaryEvent)
|
|
||||||
p.SetParser(containerEvent.ParseSetEACLNotary)
|
|
||||||
pp = append(pp, p)
|
|
||||||
|
|
||||||
return pp
|
return pp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,10 +185,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
|
||||||
h.SetHandler(cp.handleDelete)
|
h.SetHandler(cp.handleDelete)
|
||||||
hh = append(hh, h)
|
hh = append(hh, h)
|
||||||
|
|
||||||
// set eACL
|
|
||||||
h.SetRequestType(containerEvent.SetEACLNotaryEvent)
|
|
||||||
h.SetHandler(cp.handleSetEACL)
|
|
||||||
hh = append(hh, h)
|
|
||||||
|
|
||||||
return hh
|
return hh
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,38 +8,38 @@ import (
|
||||||
// Record is an interface of read-only
|
// Record is an interface of read-only
|
||||||
// FrostFS LOCODE database single entry.
|
// FrostFS LOCODE database single entry.
|
||||||
type Record interface {
|
type Record interface {
|
||||||
// Must return ISO 3166-1 alpha-2
|
// CountryCode must return ISO 3166-1 alpha-2
|
||||||
// country code.
|
// country code.
|
||||||
//
|
//
|
||||||
// Must not return nil.
|
// Must not return nil.
|
||||||
CountryCode() *locodedb.CountryCode
|
CountryCode() *locodedb.CountryCode
|
||||||
|
|
||||||
// Must return English short country name
|
// CountryName must return English short country name
|
||||||
// officially used by the ISO 3166
|
// officially used by the ISO 3166
|
||||||
// Maintenance Agency (ISO 3166/MA).
|
// Maintenance Agency (ISO 3166/MA).
|
||||||
CountryName() string
|
CountryName() string
|
||||||
|
|
||||||
// Must return UN/LOCODE 3-character code
|
// LocationCode must return UN/LOCODE 3-character code
|
||||||
// for the location (numerals 2-9 may also
|
// for the location (numerals 2-9 may also
|
||||||
// be used).
|
// be used).
|
||||||
//
|
//
|
||||||
// Must not return nil.
|
// Must not return nil.
|
||||||
LocationCode() *locodedb.LocationCode
|
LocationCode() *locodedb.LocationCode
|
||||||
|
|
||||||
// Must return name of the location which
|
// LocationName must return name of the location which
|
||||||
// have been allocated a UN/LOCODE without
|
// have been allocated a UN/LOCODE without
|
||||||
// diacritic sign.
|
// diacritic sign.
|
||||||
LocationName() string
|
LocationName() string
|
||||||
|
|
||||||
// Must return ISO 1-3 character alphabetic
|
// SubDivCode Must return ISO 1-3 character alphabetic
|
||||||
// and/or numeric code for the administrative
|
// and/or numeric code for the administrative
|
||||||
// division of the country concerned.
|
// division of the country concerned.
|
||||||
SubDivCode() string
|
SubDivCode() string
|
||||||
|
|
||||||
// Must return subdivision name.
|
// SubDivName must return subdivision name.
|
||||||
SubDivName() string
|
SubDivName() string
|
||||||
|
|
||||||
// Must return existing continent where is
|
// Continent must return existing continent where is
|
||||||
// the location.
|
// the location.
|
||||||
//
|
//
|
||||||
// Must not return nil.
|
// Must not return nil.
|
||||||
|
@ -49,7 +49,7 @@ type Record interface {
|
||||||
// DB is an interface of read-only
|
// DB is an interface of read-only
|
||||||
// FrostFS LOCODE database.
|
// FrostFS LOCODE database.
|
||||||
type DB interface {
|
type DB interface {
|
||||||
// Must find the record that corresponds to
|
// Get must find the record that corresponds to
|
||||||
// LOCODE and provides the Record interface.
|
// LOCODE and provides the Record interface.
|
||||||
//
|
//
|
||||||
// Must return an error if Record is nil.
|
// Must return an error if Record is nil.
|
||||||
|
|
|
@ -43,7 +43,7 @@ type (
|
||||||
// of information about the node and its finalization for adding
|
// of information about the node and its finalization for adding
|
||||||
// to the network map.
|
// to the network map.
|
||||||
NodeValidator interface {
|
NodeValidator interface {
|
||||||
// Must verify and optionally update NodeInfo structure.
|
// VerifyAndUpdate must verify and optionally update NodeInfo structure.
|
||||||
//
|
//
|
||||||
// Must return an error if NodeInfo input is invalid.
|
// Must return an error if NodeInfo input is invalid.
|
||||||
// Must return an error if it is not possible to correctly
|
// Must return an error if it is not possible to correctly
|
||||||
|
|
|
@ -61,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
obj := blobstortest.NewObject(64 * 1024) // 64KB object
|
obj := blobstortest.NewObject(64 * 1024) // 64KB object
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -168,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
toDelete := make(map[oid.Address][]byte)
|
toDelete := make(map[oid.Address][]byte)
|
||||||
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||||
obj := blobstortest.NewObject(64 * 1024)
|
obj := blobstortest.NewObject(64 * 1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -236,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
|
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||||
obj := blobstortest.NewObject(64 * 1024)
|
obj := blobstortest.NewObject(64 * 1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue