Compare commits

..

2 commits

Author SHA1 Message Date
1ec5377241
m
Some checks failed
DCO action / DCO (pull_request) Failing after 32s
Vulncheck / Vulncheck (pull_request) Successful in 1m10s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m31s
Tests and linters / Lint (pull_request) Failing after 1m59s
Tests and linters / Staticcheck (pull_request) Failing after 1m55s
Tests and linters / Tests (pull_request) Failing after 1m57s
Build / Build Components (pull_request) Successful in 2m13s
Tests and linters / Tests with -race (pull_request) Failing after 3m25s
Tests and linters / gopls check (pull_request) Successful in 3m44s
Tests and linters / Run gofumpt (pull_request) Successful in 4m37s
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-03-20 16:15:13 +03:00
eb8b0a6999
WIP: metabase: Find attribute without full unmarshal
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-03-20 16:15:12 +03:00
93 changed files with 3852 additions and 1618 deletions

87
.ci/Jenkinsfile vendored
View file

@ -1,87 +0,0 @@
def golang = ['1.23', '1.24']
def golangDefault = "golang:${golang.last()}"
async {
for (version in golang) {
def go = version
task("test/go${go}") {
container("golang:${go}") {
sh 'make test'
}
}
task("build/go${go}") {
container("golang:${go}") {
for (app in ['cli', 'node', 'ir', 'adm', 'lens']) {
sh """
make bin/frostfs-${app}
bin/frostfs-${app} --version
"""
}
}
}
}
task('test/race') {
container(golangDefault) {
sh 'make test GOFLAGS="-count=1 -race"'
}
}
task('lint') {
container(golangDefault) {
sh 'make lint-install lint'
}
}
task('staticcheck') {
container(golangDefault) {
sh 'make staticcheck-install staticcheck-run'
}
}
task('gopls') {
container(golangDefault) {
sh 'make gopls-install gopls-run'
}
}
task('gofumpt') {
container(golangDefault) {
sh '''
make fumpt-install
make fumpt
git diff --exit-code --quiet
'''
}
}
task('vulncheck') {
container(golangDefault) {
sh '''
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
'''
}
}
task('pre-commit') {
dockerfile("""
FROM ${golangDefault}
RUN apt update && \
apt install -y --no-install-recommends pre-commit
""") {
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
}
}
}
task('dco') {
container('git.frostfs.info/truecloudlab/commit-check:master') {
sh 'FROM=pull_request_target commit-check'
}
}
}

View file

@ -1,6 +1,5 @@
#!/usr/bin/make -f
SHELL = bash
.SHELLFLAGS = -euo pipefail -c
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
@ -116,7 +115,7 @@ protoc:
# Install protoc
protoc-install:
@rm -rf $(PROTOBUF_DIR)
@mkdir -p $(PROTOBUF_DIR)
@mkdir $(PROTOBUF_DIR)
@echo "⇒ Installing protoc... "
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
@ -170,7 +169,7 @@ imports:
# Install gofumpt
fumpt-install:
@rm -rf $(GOFUMPT_DIR)
@mkdir -p $(GOFUMPT_DIR)
@mkdir $(GOFUMPT_DIR)
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
# Run gofumpt
@ -187,37 +186,14 @@ test:
@echo "⇒ Running go test"
@GOFLAGS="$(GOFLAGS)" go test ./...
# Install Gerrit commit-msg hook
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
review-install:
@git config remote.review.url \
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
@mkdir -p $(GIT_HOOK_DIR)/
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
@chmod +x $(GIT_HOOK_DIR)/commit-msg
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
# Create a PR in Gerrit
review: BRANCH ?= master
review:
@git push review HEAD:refs/for/$(BRANCH) \
--push-option r=e.stratonikov@yadro.com \
--push-option r=d.stepanov@yadro.com \
--push-option r=an.nikiforov@yadro.com \
--push-option r=a.arifullin@yadro.com \
--push-option r=ekaterina.lebedeva@yadro.com \
--push-option r=a.savchuk@yadro.com \
--push-option r=a.chuprov@yadro.com
# Run pre-commit
pre-commit-run:
@pre-commit run -a --hook-stage manual
# Install linters
lint-install: $(BIN)
lint-install:
@rm -rf $(OUTPUT_LINT_DIR)
@mkdir -p $(OUTPUT_LINT_DIR)
@mkdir $(OUTPUT_LINT_DIR)
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@ -236,7 +212,7 @@ lint:
# Install staticcheck
staticcheck-install:
@rm -rf $(STATICCHECK_DIR)
@mkdir -p $(STATICCHECK_DIR)
@mkdir $(STATICCHECK_DIR)
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
# Run staticcheck
@ -249,7 +225,7 @@ staticcheck-run:
# Install gopls
gopls-install:
@rm -rf $(GOPLS_DIR)
@mkdir -p $(GOPLS_DIR)
@mkdir $(GOPLS_DIR)
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
# Run gopls

View file

@ -6,7 +6,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
@ -14,7 +13,9 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (*
}
func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
switch c.(type) {
case *rpcclient.Client:
inv := invoker.New(c, nil)
reader := nns2.NewReader(inv, nnsHash)
return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
return false, fmt.Errorf("`isAvailable`: invalid response: %w", err)
}
return b, nil
}
}
func CheckNotaryEnabled(c Client) error {

View file

@ -40,8 +40,6 @@ type ClientContext struct {
CommitteeAct *actor.Actor // committee actor with the Global witness scope
ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer
SentTxs []HashVUBPair
AwaitDisabled bool
}
func NewRemoteClient(v *viper.Viper) (Client, error) {
@ -122,7 +120,7 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command,
}
func (c *ClientContext) AwaitTx(cmd *cobra.Command) error {
if len(c.SentTxs) == 0 || c.AwaitDisabled {
if len(c.SentTxs) == 0 {
return nil
}

View file

@ -39,7 +39,6 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
return err
}
initCtx.AwaitDisabled = true
cmd.Println("Stage 4.1: Transfer GAS to proxy contract.")
if err := transferGASToProxy(initCtx); err != nil {
return err
@ -56,10 +55,5 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error {
}
cmd.Println("Stage 7: set addresses in NNS.")
if err := setNNS(initCtx); err != nil {
return err
}
initCtx.AwaitDisabled = false
return initCtx.AwaitTx()
return setNNS(initCtx)
}

View file

@ -1,6 +1,7 @@
package initialize
import (
"errors"
"fmt"
"math/big"
@ -10,8 +11,11 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -26,8 +30,7 @@ const (
)
func registerCandidateRange(c *helper.InitializeContext, start, end int) error {
reader := neo.NewReader(c.ReadOnlyInvoker)
regPrice, err := reader.GetRegisterPrice()
regPrice, err := getCandidateRegisterPrice(c)
if err != nil {
return fmt.Errorf("can't fetch registration price: %w", err)
}
@ -113,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error {
func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
neoHash := neo.Hash
ok, err := transferNEOFinished(c)
ok, err := transferNEOFinished(c, neoHash)
if ok || err != nil {
return err
}
@ -136,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error {
return c.AwaitTx()
}
func transferNEOFinished(c *helper.InitializeContext) (bool, error) {
r := neo.NewReader(c.ReadOnlyInvoker)
func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) {
r := nep17.NewReader(c.ReadOnlyInvoker, neoHash)
bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash())
return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err
}
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) {
switch c.Client.(type) {
case *rpcclient.Client:
inv := invoker.New(c.Client, nil)
reader := neo.NewReader(inv)
return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
if err != nil {
return 0, err
}
if len(res.Stack) == 0 {
return 0, errGetPriceInvalid
}
bi, err := res.Stack[0].TryInteger()
if err != nil || !bi.IsInt64() {
return 0, errGetPriceInvalid
}
return bi.Int64(), nil
}
}

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
@ -40,6 +41,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))

View file

@ -0,0 +1,135 @@
package storagecfg
const configTemplate = `logger:
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
node:
wallet:
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
addresses: # list of addresses announced by Storage node in the Network map
- {{ .AnnouncedAddress }}
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
grpc:
num: 1 # total number of listener endpoints
0:
endpoint: {{ .Endpoint }} # endpoint for gRPC server
tls:{{if .TLSCert}}
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
certificate: {{ .TLSCert }} # path to TLS certificate
key: {{ .TLSKey }} # path to TLS key
{{- else }}
enabled: false # disable TLS for a gRPC connection
{{- end}}
control:
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
{{- range .AuthorizedKeys }}
- {{.}}{{end}}
grpc:
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
morph:
dial_timeout: 20s # timeout for side chain NEO RPC client connection
cache_ttl: 15s # use TTL cache for side chain GET operations
rpc_endpoint: # side chain N3 RPC endpoints
{{- range .MorphRPC }}
- address: wss://{{.}}/ws{{end}}
{{if not .Relay }}
storage:
shard:
default: # section with the default shard parameters
metabase:
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
blobstor:
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 2 # max depth of object tree storage in FS
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
compress: true # turn on/off Zstandard compression (level 3) of stored objects
compression_exclude_content_types:
- audio/*
- video/*
blobovnicza:
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
gc:
remover_batch_size: 200 # number of objects to be removed by the garbage collector
remover_sleep_interval: 5m # frequency of the garbage collector invocation
0:
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
metabase:
path: {{ .MetabasePath }} # path to the metabase
blobstor:
path: {{ .BlobstorPath }} # path to the blobstor
{{end}}`
const (
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
)
var n3config = map[string]struct {
MorphRPC []string
RPC []string
NeoFSContract string
BalanceContract string
}{
"testnet": {
MorphRPC: []string{
"rpc01.morph.testnet.fs.neo.org:51331",
"rpc02.morph.testnet.fs.neo.org:51331",
"rpc03.morph.testnet.fs.neo.org:51331",
"rpc04.morph.testnet.fs.neo.org:51331",
"rpc05.morph.testnet.fs.neo.org:51331",
"rpc06.morph.testnet.fs.neo.org:51331",
"rpc07.morph.testnet.fs.neo.org:51331",
},
RPC: []string{
"rpc01.testnet.n3.nspcc.ru:21331",
"rpc02.testnet.n3.nspcc.ru:21331",
"rpc03.testnet.n3.nspcc.ru:21331",
"rpc04.testnet.n3.nspcc.ru:21331",
"rpc05.testnet.n3.nspcc.ru:21331",
"rpc06.testnet.n3.nspcc.ru:21331",
"rpc07.testnet.n3.nspcc.ru:21331",
},
NeoFSContract: neofsTestnetAddress,
BalanceContract: balanceTestnetAddress,
},
"mainnet": {
MorphRPC: []string{
"rpc1.morph.fs.neo.org:40341",
"rpc2.morph.fs.neo.org:40341",
"rpc3.morph.fs.neo.org:40341",
"rpc4.morph.fs.neo.org:40341",
"rpc5.morph.fs.neo.org:40341",
"rpc6.morph.fs.neo.org:40341",
"rpc7.morph.fs.neo.org:40341",
},
RPC: []string{
"rpc1.n3.nspcc.ru:10331",
"rpc2.n3.nspcc.ru:10331",
"rpc3.n3.nspcc.ru:10331",
"rpc4.n3.nspcc.ru:10331",
"rpc5.n3.nspcc.ru:10331",
"rpc6.n3.nspcc.ru:10331",
"rpc7.n3.nspcc.ru:10331",
},
NeoFSContract: neofsMainnetAddress,
BalanceContract: balanceMainnetAddress,
},
}

View file

@ -0,0 +1,432 @@
package storagecfg
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
"time"
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/chzyer/readline"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
)
const (
walletFlag = "wallet"
accountFlag = "account"
)
const (
defaultControlEndpoint = "localhost:8090"
defaultDataEndpoint = "localhost"
)
// RootCmd is a root command of config section.
var RootCmd = &cobra.Command{
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
Short: "Section for storage node configuration commands",
Run: storageConfig,
}
func init() {
fs := RootCmd.Flags()
fs.StringP(walletFlag, "w", "", "Path to wallet")
fs.StringP(accountFlag, "a", "", "Wallet account")
}
type config struct {
AnnouncedAddress string
AuthorizedKeys []string
ControlEndpoint string
Endpoint string
TLSCert string
TLSKey string
MorphRPC []string
Attribute struct {
Locode string
}
Wallet struct {
Path string
Account string
Password string
}
Relay bool
BlobstorPath string
MetabasePath string
}
func storageConfig(cmd *cobra.Command, args []string) {
outPath := getOutputPath(args)
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
readline.SetHistoryPath(historyPath)
var c config
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
if c.Wallet.Path == "" {
c.Wallet.Path = getPath("Path to the storage node wallet: ")
}
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
fatalOnErr(err)
fillWalletAccount(cmd, &c, w)
accH, err := flags.ParseAddress(c.Wallet.Account)
fatalOnErr(err)
acc := w.GetAccount(accH)
if acc == nil {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
fatalOnErr(err)
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
network := readNetwork(cmd)
c.MorphRPC = n3config[network].MorphRPC
depositGas(cmd, acc, network)
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
endpoint := getDefaultEndpoint(cmd, &c)
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
if c.Endpoint == "" {
c.Endpoint = endpoint
}
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
if c.ControlEndpoint == "" {
c.ControlEndpoint = defaultControlEndpoint
}
c.TLSCert = getPath("TLS Certificate (optional): ")
if c.TLSCert != "" {
c.TLSKey = getPath("TLS Key: ")
}
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
if !c.Relay {
p := getPath("Path to the storage directory (all available storage will be used): ")
c.BlobstorPath = filepath.Join(p, "blob")
c.MetabasePath = filepath.Join(p, "meta")
}
out := applyTemplate(c)
fatalOnErr(os.WriteFile(outPath, out, 0o644))
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
}
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
var addr, port string
for {
c.AnnouncedAddress = getString("Publicly announced address: ")
validator := netutil.Address{}
err := validator.FromString(c.AnnouncedAddress)
if err != nil {
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
continue
}
uriAddr, err := url.Parse(validator.URIAddr())
if err != nil {
panic(fmt.Errorf("unexpected error: %w", err))
}
addr = uriAddr.Hostname()
port = uriAddr.Port()
ip, err := net.ResolveIPAddr("ip", addr)
if err != nil {
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
continue
}
if !ip.IP.IsGlobalUnicast() {
cmd.Println("IP must be global unicast.")
continue
}
cmd.Printf("Resolved IP address: %s\n", ip.String())
_, err = strconv.ParseUint(port, 10, 16)
if err != nil {
cmd.Println("Port must be an integer.")
continue
}
break
}
return net.JoinHostPort(defaultDataEndpoint, port)
}
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
if c.Wallet.Account == "" {
addr := address.Uint160ToString(w.GetChangeAddress())
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
if c.Wallet.Account == "" {
c.Wallet.Account = addr
}
}
}
func readNetwork(cmd *cobra.Command) string {
var network string
for {
network = getString("Choose network [mainnet]/testnet: ")
switch network {
case "":
network = "mainnet"
case "testnet", "mainnet":
default:
cmd.Println(`Network must be either "mainnet" or "testnet"`)
continue
}
break
}
return network
}
func getOutputPath(args []string) string {
if len(args) != 0 {
return args[0]
}
outPath := getPath("File to write config at [./config.yml]: ")
if outPath == "" {
outPath = "./config.yml"
}
return outPath
}
func getWalletAccount(w *wallet.Wallet, prompt string) string {
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
for i := range w.Accounts {
addrs[i] = readline.PcItem(w.Accounts[i].Address)
}
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
defer readline.SetAutoComplete(nil)
s, err := readline.Line(prompt)
fatalOnErr(err)
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
}
func getString(prompt string) string {
s, err := readline.Line(prompt)
fatalOnErr(err)
if s != "" {
_ = readline.AddHistory(s)
}
return s
}
type filenameCompleter struct{}
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
prefix := string(line[:pos])
dir := filepath.Dir(prefix)
de, err := os.ReadDir(dir)
if err != nil {
return nil, 0
}
for i := range de {
name := filepath.Join(dir, de[i].Name())
if strings.HasPrefix(name, prefix) {
tail := []rune(strings.TrimPrefix(name, prefix))
if de[i].IsDir() {
tail = append(tail, filepath.Separator)
}
newLine = append(newLine, tail)
}
}
if pos != 0 {
return newLine, pos - len([]rune(dir))
}
return newLine, 0
}
func getPath(prompt string) string {
readline.SetAutoComplete(filenameCompleter{})
defer readline.SetAutoComplete(nil)
p, err := readline.Line(prompt)
fatalOnErr(err)
if p == "" {
return p
}
_ = readline.AddHistory(p)
abs, err := filepath.Abs(p)
if err != nil {
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
}
return abs
}
func getConfirmation(def bool, prompt string) bool {
for {
s, err := readline.Line(prompt)
fatalOnErr(err)
switch strings.ToLower(s) {
case "y", "yes":
return true
case "n", "no":
return false
default:
if len(s) == 0 {
return def
}
}
}
}
func applyTemplate(c config) []byte {
tmpl, err := template.New("config").Parse(configTemplate)
fatalOnErr(err)
b := bytes.NewBuffer(nil)
fatalOnErr(tmpl.Execute(b, c))
return b.Bytes()
}
func fatalOnErr(err error) {
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
sideClient := initClient(n3config[network].MorphRPC)
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
sideActor, err := actor.NewSimple(sideClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
}
sideGas := nep17.NewReader(sideActor, balanceHash)
accSH := acc.Contract.ScriptHash()
balance, err := sideGas.BalanceOf(accSH)
if err != nil {
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
}
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
fixedn.ToString(balance, 12)))
if !ok {
return
}
amountStr := getString("Enter amount in GAS: ")
amount, err := fixedn.FromString(amountStr, 8)
if err != nil {
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
}
mainClient := initClient(n3config[network].RPC)
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
mainActor, err := actor.NewSimple(mainClient, acc)
if err != nil {
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
}
mainGas := nep17.New(mainActor, gas.Hash)
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
if err != nil {
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
}
cmd.Print("Waiting for transactions to persist.")
tick := time.NewTicker(time.Second / 2)
defer tick.Stop()
timer := time.NewTimer(time.Second * 20)
defer timer.Stop()
at := trigger.Application
loop:
for {
select {
case <-tick.C:
_, err := mainClient.GetApplicationLog(txHash, &at)
if err == nil {
cmd.Print("\n")
break loop
}
cmd.Print(".")
case <-timer.C:
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
return
}
os.Exit(1)
}
}
}
func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := slices.Clone(rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
DialTimeout: time.Second * 2,
RequestTimeout: time.Second * 5,
})
if err != nil {
continue
}
if err = c.Init(); err != nil {
continue
}
return c
}
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
panic("unreachable")
}

View file

@ -858,8 +858,6 @@ type PatchObjectPrm struct {
ReplaceAttribute bool
NewSplitHeader *objectSDK.SplitHeader
PayloadPatches []PayloadPatch
}
@ -890,11 +888,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
if patcher.PatchHeader(ctx, client.PatchHeaderPrm{
NewSplitHeader: prm.NewSplitHeader,
NewAttributes: prm.NewAttributes,
ReplaceAttributes: prm.ReplaceAttribute,
}) {
if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) {
for _, pp := range prm.PayloadPatches {
payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm)
if err != nil {

View file

@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
prmDial := client.PrmDial{
Endpoint: addr.URIAddr(),
GRPCDialOptions: []grpc.DialOption{
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
},

View file

@ -2,7 +2,6 @@ package object
import (
"fmt"
"os"
"strconv"
"strings"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -22,7 +20,6 @@ const (
replaceAttrsFlagName = "replace-attrs"
rangeFlagName = "range"
payloadFlagName = "payload"
splitHeaderFlagName = "split-header"
)
var objectPatchCmd = &cobra.Command{
@ -53,7 +50,6 @@ func initObjectPatchCmd() {
flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.")
flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length")
flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.")
flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header")
}
func patch(cmd *cobra.Command, _ []string) {
@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) {
prm.NewAttributes = newAttrs
prm.ReplaceAttribute = replaceAttrs
prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd)
for i := range ranges {
prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{
Range: ranges[i],
@ -153,22 +147,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string {
v, _ := cmd.Flags().GetStringSlice(payloadFlagName)
return v
}
func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader {
path, _ := cmd.Flags().GetString(splitHeaderFlagName)
if path == "" {
return nil
}
data, err := os.ReadFile(path)
commonCmd.ExitOnErr(cmd, "read file error: %w", err)
splitHdrV2 := new(objectV2.SplitHeader)
err = splitHdrV2.Unmarshal(data)
if err != nil {
err = splitHdrV2.UnmarshalJSON(data)
commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err)
}
return objectSDK.NewSplitHeaderFromV2(splitHdrV2)
}

View file

@ -2,19 +2,17 @@ package tree
import (
"context"
"crypto/tls"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@ -33,29 +31,22 @@ func _client() (tree.TreeServiceClient, error) {
return nil, err
}
host, isTLS, err := client.ParseURI(netAddr.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
tracing.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
tracing.NewStreamClientInterceptor(),
),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
grpc.WithTransportCredentials(creds),
}
cc, err := grpc.NewClient(host, opts...)
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}

View file

@ -9,7 +9,6 @@ import (
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@ -39,14 +38,13 @@ func reloadConfig() error {
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
audit.Store(cfg.GetBool("audit.enabled"))
var logPrm logger.Prm
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err
}
log.Reload(logPrm)
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
return nil
return logPrm.Reload()
}
func watchForSignal(ctx context.Context, cancel func()) {

View file

@ -31,6 +31,7 @@ const (
var (
wg = new(sync.WaitGroup)
intErr = make(chan error) // internal inner ring errors
logPrm = new(logger.Prm)
innerRing *innerring.Server
pprofCmp *pprofComponent
metricsCmp *httpComponent
@ -69,7 +70,6 @@ func main() {
metrics := irMetrics.NewInnerRingMetrics()
var logPrm logger.Prm
err = logPrm.SetLevelString(
cfg.GetString("logger.level"),
)

View file

@ -108,7 +108,6 @@ type applicationConfiguration struct {
level string
destination string
timestamp bool
options []zap.Option
}
ObjectCfg struct {
@ -233,14 +232,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
var opts []zap.Option
if loggerconfig.ToLokiConfig(c).Enabled {
opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c))
return lokiCore
})}
}
a.LoggerCfg.options = opts
// Object
@ -482,6 +473,7 @@ type shared struct {
// dynamicConfiguration stores parameters of the
// components that supports runtime reconfigurations.
type dynamicConfiguration struct {
logger *logger.Prm
pprof *httpComponent
metrics *httpComponent
}
@ -722,11 +714,16 @@ func initCfg(appCfg *config.Config) *cfg {
netState.metrics = c.metricsCollector
logPrm, err := c.loggerPrm()
fatalOnErr(err)
logPrm := c.loggerPrm()
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
log, err := logger.NewLogger(logPrm)
fatalOnErr(err)
if loggerconfig.ToLokiConfig(appCfg).Enabled {
log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg))
return lokiCore
}))
}
c.internals = initInternals(appCfg, log)
@ -1079,23 +1076,26 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
return sh
}
func (c *cfg) loggerPrm() (logger.Prm, error) {
var prm logger.Prm
// (re)init read configuration
err := prm.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
func (c *cfg) loggerPrm() *logger.Prm {
// check if it has been inited before
if c.dynamicConfiguration.logger == nil {
c.dynamicConfiguration.logger = new(logger.Prm)
}
err = prm.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
}
prm.PrependTimestamp = c.LoggerCfg.timestamp
prm.Options = c.LoggerCfg.options
return prm, nil
// (re)init read configuration
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log level format: " + c.LoggerCfg.level)
}
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
if err != nil {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger
}
func (c *cfg) LocalAddress() network.AddressGroup {
@ -1335,7 +1335,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
components := c.getComponents(ctx)
// Logger
logPrm := c.loggerPrm()
components := c.getComponents(ctx, logPrm)
// Object
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
@ -1373,17 +1377,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) getComponents(ctx context.Context) []dCmp {
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
var components []dCmp
components = append(components, dCmp{"logger", func() error {
prm, err := c.loggerPrm()
if err != nil {
return err
}
c.log.Reload(prm)
return nil
}})
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(ctx, c)
return nil

View file

@ -168,10 +168,9 @@ func TestEngineSection(t *testing.T) {
LimitOps: toPtr(25000),
},
{
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
Prohibited: true,
Tag: "policer",
Weight: toPtr(5),
LimitOps: toPtr(25000),
},
})
require.ElementsMatch(t, writeLimits.Tags,

View file

@ -84,7 +84,6 @@ type IOTagConfig struct {
Weight *float64
LimitOps *float64
ReservedOps *float64
Prohibited bool
}
func tags(c *config.Config) []IOTagConfig {
@ -120,13 +119,6 @@ func tags(c *config.Config) []IOTagConfig {
tagConfig.ReservedOps = &r
}
v = c.Value(strconv.Itoa(i) + ".prohibited")
if v != nil {
r, err := cast.ToBoolE(v)
panicOnErr(err)
tagConfig.Prohibited = r
}
result = append(result, tagConfig)
}
}

View file

@ -31,11 +31,12 @@ func Limits(c *config.Config) []LimitConfig {
break
}
if sc.Value("max_ops") == nil {
maxOps := config.IntSafe(sc, "max_ops")
if maxOps == 0 {
panic("no max operations for method group")
}
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
limits = append(limits, LimitConfig{methods, maxOps})
}
return limits

View file

@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) {
})
t.Run("no max operations", func(t *testing.T) {
const path = "testdata/no_max_ops"
const path = "testdata/node"
fileConfigTest := func(c *config.Config) {
require.Panics(t, func() { _ = Limits(c) })
@ -50,28 +50,4 @@ func TestRPCSection(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
t.Run("zero max operations", func(t *testing.T) {
const path = "testdata/zero_max_ops"
fileConfigTest := func(c *config.Config) {
limits := Limits(c)
require.Len(t, limits, 2)
limit0 := limits[0]
limit1 := limits[1]
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
require.Equal(t, limit0.MaxOps, int64(0))
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
require.Equal(t, limit1.MaxOps, int64(10000))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
})
}

View file

@ -1,4 +0,0 @@
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000

View file

@ -1,19 +0,0 @@
{
"rpc": {
"limits": [
{
"methods": [
"/neo.fs.v2.object.ObjectService/PutSingle",
"/neo.fs.v2.object.ObjectService/Put"
],
"max_ops": 0
},
{
"methods": [
"/neo.fs.v2.object.ObjectService/Get"
],
"max_ops": 10000
}
]
}
}

View file

@ -1,9 +0,0 @@
rpc:
limits:
- methods:
- /neo.fs.v2.object.ObjectService/PutSingle
- /neo.fs.v2.object.ObjectService/Put
max_ops: 0
- methods:
- /neo.fs.v2.object.ObjectService/Get
max_ops: 10000

View file

@ -16,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
apeSvc := createAPEService(c, &irFetcher, splitSvc)
apeSvc := createAPEService(c, splitSvc)
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
var commonSvc objectService.Common
commonSvc.Init(&c.internals, apeSvc)
commonSvc.Init(&c.internals, aclSvc)
respSvc := objectService.NewResponseService(
&commonSvc,
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
})
}
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
return &innerRingFetcherWithNotary{
sidechain: c.cfgMorph.client,
}
@ -426,7 +429,17 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
return v2.New(
apeSvc,
c.netMapSource,
irFetcher,
c.cfgObject.cnrSource,
v2.WithLogger(c.log),
)
}
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
@ -438,7 +451,6 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cnrSource,
c.binPublicKey,
),
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
splitSvc,
)
}

View file

@ -43,9 +43,6 @@ func initQoSService(c *cfg) {
func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context {
rawTag, defined := qosTagging.IOTagFromContext(ctx)
if !defined {
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String())
}
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
ioTag, err := qos.FromRawString(rawTag)
@ -76,8 +73,20 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
case qos.IOTagInternal:
if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) {
return ctx
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, requestSignPublicKey) {
return ctx
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), requestSignPublicKey) {
return ctx
}
}
s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag)
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
@ -86,23 +95,3 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic
return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String())
}
}
func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool {
for _, pk := range s.allowedInternalPubs {
if bytes.Equal(pk, publicKey) {
return true
}
}
nm, err := s.netmapSource.GetNetMap(ctx, 0)
if err != nil {
s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err))
return false
}
for _, node := range nm.Nodes() {
if bytes.Equal(node.PublicKey(), publicKey) {
return true
}
}
return false
}

View file

@ -1,226 +0,0 @@
package main
import (
"context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
func TestQoSService_Client(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag client defined", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Request)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func TestQoSService_Internal(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.Internal)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagInternal.String(), tag)
})
}
func TestQoSService_Critical(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.Critical)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagCritical.String(), tag)
})
}
func TestQoSService_NetmapGetError(t *testing.T) {
t.Parallel()
s, pk := testQoSServicePrepare(t)
s.netmapSource = &utilTesting.TestNetmapSource{}
t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) {
ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) {
ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know")
ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode)
tag, ok := tagging.IOTagFromContext(ctx)
require.True(t, ok)
require.Equal(t, qos.IOTagClient.String(), tag)
})
}
func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) {
nmSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
reqSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedCritSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
allowedIntSigner, err := keys.NewPrivateKey()
require.NoError(t, err)
var node netmap.NodeInfo
node.SetPublicKey(nmSigner.PublicKey().Bytes())
nm := &netmap.NetMap{}
nm.SetEpoch(100)
nm.SetNodes([]netmap.NodeInfo{node})
return &cfgQoSService{
logger: test.NewLogger(t),
netmapSource: &utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
100: nm,
},
CurrentEpoch: 100,
},
allowedCriticalPubs: [][]byte{
allowedCritSigner.PublicKey().Bytes(),
},
allowedInternalPubs: [][]byte{
allowedIntSigner.PublicKey().Bytes(),
},
},
&testQoSServicePublicKeys{
NetmapNode: nmSigner.PublicKey().Bytes(),
Request: reqSigner.PublicKey().Bytes(),
Internal: allowedIntSigner.PublicKey().Bytes(),
Critical: allowedCritSigner.PublicKey().Bytes(),
}
}
type testQoSServicePublicKeys struct {
NetmapNode []byte
Request []byte
Internal []byte
Critical []byte
}

View file

@ -1,6 +1,7 @@
package main
import (
"os"
"path/filepath"
"testing"
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
require.NoError(t, err)
})
})
t.Run("mainnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
t.Run("testnet", func(t *testing.T) {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
c := config.New(p, "", config.EnvPrefix)
require.NoError(t, validateConfig(c))
})
}

View file

@ -180,7 +180,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000
FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200
FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0

View file

@ -252,8 +252,7 @@
{
"tag": "policer",
"weight": 5,
"limit_ops": 25000,
"prohibited": true
"limit_ops": 25000
}
]
},

View file

@ -148,7 +148,7 @@ storage:
flush_worker_count: 30 # number of write-cache flusher threads
metabase:
perm: 0o644 # permissions for metabase files(directories: +x for current user and group)
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
max_batch_size: 200
max_batch_delay: 20ms
@ -161,13 +161,13 @@ storage:
blobstor:
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
- perm: 0o644 # permissions for blobstor files(directories: +x for current user and group)
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS
gc:
@ -249,7 +249,6 @@ storage:
- tag: policer
weight: 5
limit_ops: 25000
prohibited: true
write:
max_running_ops: 1000
max_waiting_ops: 100
@ -291,7 +290,7 @@ storage:
pilorama:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
perm: 0o644 # permission to use for the database file and intermediate directories
perm: 0644 # permission to use for the database file and intermediate directories
tracing:
enabled: true

28
config/mainnet/README.md Normal file
View file

@ -0,0 +1,28 @@
# N3 Mainnet Storage node configuration
Here is a template for simple storage node configuration in N3 Mainnet.
Make sure to specify correct values instead of `<...>` placeholders.
Do not change `contracts` section. Run the latest frostfs-node release with
the fixed config `frostfs-node -c config.yml`
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
## Tips
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
```yaml
node:
addresses:
- grpcs://frostfs.my.org:8080
grpc:
num: 1
0:
endpoint: frostfs.my.org:8080
tls:
enabled: true
certificate: /path/to/cert
key: /path/to/key
```

70
config/mainnet/config.yml Normal file
View file

@ -0,0 +1,70 @@
node:
wallet:
path: <path/to/wallet>
address: <address-in-wallet>
password: <password>
addresses:
- <announced.address:port>
attribute_0: UN-LOCODE:<XX YYY>
attribute_1: Price:100000
attribute_2: User-Agent:FrostFS\/0.9999
grpc:
num: 1
0:
endpoint: <listen.local.address:port>
tls:
enabled: false
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/path/metabase
perm: 0600
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m
logger:
level: info
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
object:
put:
remote_pool_size: 100
local_pool_size: 100
morph:
rpc_endpoint:
- wss://rpc1.morph.frostfs.info:40341/ws
- wss://rpc2.morph.frostfs.info:40341/ws
- wss://rpc3.morph.frostfs.info:40341/ws
- wss://rpc4.morph.frostfs.info:40341/ws
- wss://rpc5.morph.frostfs.info:40341/ws
- wss://rpc6.morph.frostfs.info:40341/ws
- wss://rpc7.morph.frostfs.info:40341/ws
dial_timeout: 20s
contracts:
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1

129
config/testnet/README.md Normal file
View file

@ -0,0 +1,129 @@
# N3 Testnet Storage node configuration
There is a prepared configuration for NeoFS Storage Node deployment in
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
docker image and run it with docker-compose.
## Build image
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
However, if you need to rebuild it for some reason, run
`make image-storage-testnet` command.
```
$ make image-storage-testnet
...
Successfully built ab0557117b02
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
```
## Deploy node
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
update docker-compose file and start the node.
### Deposit
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
First, obtain GAS in N3 Testnet chain. You can do that with
[faucet](https://neowish.ngd.network) service.
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
You can provide scripthash in the `data` argument of transfer tx to make a
deposit to a specified account. Otherwise, deposit is made to the tx sender.
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
See a deposit example with `neo-go`.
```
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
--token GAS \
--amount 1
```
### Configure
Next, configure `node_config.env` file. Change endpoints values. Both
should contain your **public** IP.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
```
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
attribute.
```
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
Country: Russia
Location: Saint Petersburg (ex Leningrad)
Continent: Europe
Subdivision: [SPE] Sankt-Peterburg
Coordinates: 59.53, 30.15
```
It is recommended to pass the node's key as a file. To do so, convert your wallet
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
```
// Print WIF in a 32-byte hex format
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
// Save 32-byte hex into a file
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
```
Then, specify the path to this file in `docker-compose.yml`
```yaml
volumes:
- frostfs_storage:/storage
- ./my_wallet.key:/node.key
```
NeoFS objects will be stored on your machine. By default, docker-compose
is configured to store objects in named docker volume `frostfs_storage`. You can
specify a directory on the filesystem to store objects there.
```yaml
volumes:
- /home/username/frostfs/rc3/storage:/storage
- ./my_wallet.key:/node.key
```
### Start
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
### Debug
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
log, set up log level to debug with this env:
```yaml
environment:
- NEOFS_LOGGER_LEVEL=debug
```

52
config/testnet/config.yml Normal file
View file

@ -0,0 +1,52 @@
logger:
level: info
morph:
rpc_endpoint:
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
dial_timeout: 20s
contracts:
balance: e0420c216003747626670d1424569c17c79015bf
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
node:
key: /node.key
attribute_0: Deployed:SelfHosted
attribute_1: User-Agent:FrostFS\/0.9999
prometheus:
enabled: true
address: localhost:9090
shutdown_timeout: 15s
storage:
shard_num: 1
shard:
0:
metabase:
path: /storage/metabase
perm: 0777
blobstor:
- path: /storage/path/blobovnicza
type: blobovnicza
perm: 0600
opened_cache_capacity: 32
depth: 1
width: 1
- path: /storage/path/fstree
type: fstree
perm: 0600
depth: 4
writecache:
enabled: false
gc:
remover_batch_size: 100
remover_sleep_interval: 1m

View file

@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
## Automatic mode changes
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
2. If shard error counter exceeds threshold.
3. If the metabase couldn't be reopened during SIGHUP handling.
# Detach shard

View file

@ -209,7 +209,7 @@ blobstor:
width: 4
- type: fstree
path: /path/to/blobstor/blobovnicza
perm: 0o644
perm: 0644
size: 4194304
depth: 1
width: 4
@ -269,7 +269,7 @@ gc:
```yaml
metabase:
path: /path/to/meta.db
perm: 0o644
perm: 0644
max_batch_size: 200
max_batch_delay: 20ms
```
@ -359,7 +359,6 @@ limits:
| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. |
| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. |
| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. |
| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. |
# `node` section

8
go.mod
View file

@ -6,10 +6,10 @@ require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b

16
go.sum
View file

@ -4,14 +4,14 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 h1:zM2l316J55h9p30snl6vHBI/h0xmnuqZjnxIjRDtJZw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

View file

@ -512,7 +512,7 @@ const (
FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
FailedToParseIncomingIOTag = "failed to parse incoming IO tag"
NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag"
FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`"
FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`"
WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object"
)

View file

@ -15,7 +15,7 @@ func newQoSMetrics() *QoSMetrics {
Namespace: namespace,
Subsystem: qosSubsystem,
Name: "operations_total",
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard",
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
}
}

View file

@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
if err != nil {
tag = IOTagClient
}
if tag.IsLocal() {
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())
@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
if err != nil {
tag = IOTagClient
}
if tag.IsLocal() {
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
tag = IOTagInternal
}
ctx = tagging.ContextWithIOTag(ctx, tag.String())

View file

@ -1,219 +0,0 @@
package qos_test
import (
"context"
"errors"
"fmt"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-qos/limiting"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
const (
okKey = "ok"
)
var (
errTest = errors.New("mock")
errWrongTag = errors.New("wrong tag")
errNoTag = errors.New("failed to get tag from context")
errResExhausted *apistatus.ResourceExhausted
tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync}
)
type mockGRPCServerStream struct {
grpc.ServerStream
ctx context.Context
}
func (m *mockGRPCServerStream) Context() context.Context {
return m.ctx
}
type limiter struct {
acquired bool
released bool
}
func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) {
l.acquired = true
if key != okKey {
return nil, false
}
return func() { l.released = true }, true
}
func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim })
handler := func(ctx context.Context, req any) (any, error) {
return nil, errTest
}
_, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler)
return err
}
func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error {
interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim })
handler := func(srv any, stream grpc.ServerStream) error {
return errTest
}
err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{
FullMethod: methodName,
}, handler)
return err
}
func Test_MaxActiveRPCLimiter(t *testing.T) {
// UnaryServerInterceptor
t.Run("unary fail", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("unary pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := unaryMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("unary pass", func(t *testing.T) {
var lim limiter
err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
})
// StreamServerInterceptor
t.Run("stream fail", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, "")
require.ErrorAs(t, err, &errResExhausted)
require.True(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("stream pass critical", func(t *testing.T) {
var lim limiter
ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String())
err := streamMaxActiveRPCLimiter(ctx, &lim, "")
require.ErrorIs(t, err, errTest)
require.False(t, lim.acquired)
require.False(t, lim.released)
})
t.Run("stream pass", func(t *testing.T) {
var lim limiter
err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey)
require.ErrorIs(t, err, errTest)
require.True(t, lim.acquired)
require.True(t, lim.released)
})
}
func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) {
interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor()
called := false
handler := func(ctx context.Context, req any) (any, error) {
called = true
if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() {
return nil, nil
}
return nil, errWrongTag
}
_, err := interceptor(context.Background(), nil, nil, handler)
require.NoError(t, err)
require.True(t, called)
}
func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) {
interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor()
// check context with no value
called := false
invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
called = true
if _, ok := tagging.IOTagFromContext(ctx); ok {
return fmt.Errorf("%v: expected no IO tags", errWrongTag)
}
return nil
}
require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil))
require.True(t, called)
// check context for internal tag
targetTag := qos.IOTagInternal.String()
invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
raw, ok := tagging.IOTagFromContext(ctx)
if !ok {
return errNoTag
}
if raw != targetTag {
return errWrongTag
}
return nil
}
for _, tag := range tags {
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
}
// check context for client tag
ctx := tagging.ContextWithIOTag(context.Background(), "")
targetTag = qos.IOTagClient.String()
require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil))
}
func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) {
interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor()
// check context with no value
called := false
streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
called = true
if _, ok := tagging.IOTagFromContext(ctx); ok {
return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag)
}
return nil, nil
}
_, err := interceptor(context.Background(), nil, nil, "", streamer, nil)
require.True(t, called)
require.NoError(t, err)
// check context for internal tag
targetTag := qos.IOTagInternal.String()
streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
raw, ok := tagging.IOTagFromContext(ctx)
if !ok {
return nil, errNoTag
}
if raw != targetTag {
return nil, errWrongTag
}
return nil, nil
}
for _, tag := range tags {
ctx := tagging.ContextWithIOTag(context.Background(), tag.String())
_, err := interceptor(ctx, nil, nil, "", streamer, nil)
require.NoError(t, err)
}
// check context for client tag
ctx := tagging.ContextWithIOTag(context.Background(), "")
targetTag = qos.IOTagClient.String()
_, err = interceptor(ctx, nil, nil, "", streamer, nil)
require.NoError(t, err)
}

View file

@ -74,7 +74,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) {
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
result := make(map[string]scheduling.TagInfo)
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} {
result[tag.String()] = scheduling.TagInfo{
Share: defaultShare,
}
@ -90,7 +90,6 @@ func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.T
if l.ReservedOps != nil && *l.ReservedOps != 0 {
v.ReservedIOPS = l.ReservedOps
}
v.Prohibited = l.Prohibited
result[l.Tag] = v
}
return result
@ -165,7 +164,8 @@ func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (R
rel, err := s.RequestArrival(ctx, tag)
stat.inProgress.Add(1)
if err != nil {
if isResourceExhaustedErr(err) {
if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) {
stat.resourceExhausted.Add(1)
return nil, &apistatus.ResourceExhausted{}
}
@ -234,9 +234,3 @@ func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation s
metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh)
}
}
func isResourceExhaustedErr(err error) bool {
return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) ||
errors.Is(err, errSemaphoreLimitExceeded) ||
errors.Is(err, scheduling.ErrTagRequestsProhibited)
}

View file

@ -3,13 +3,12 @@ package qos
const unknownStatsTag = "unknown"
var statTags = map[string]struct{}{
IOTagBackground.String(): {},
IOTagClient.String(): {},
IOTagCritical.String(): {},
IOTagBackground.String(): {},
IOTagInternal.String(): {},
IOTagPolicer.String(): {},
IOTagTreeSync.String(): {},
IOTagWritecache.String(): {},
IOTagCritical.String(): {},
unknownStatsTag: {},
}

View file

@ -10,33 +10,30 @@ import (
type IOTag string
const (
IOTagBackground IOTag = "background"
IOTagClient IOTag = "client"
IOTagCritical IOTag = "critical"
IOTagInternal IOTag = "internal"
IOTagPolicer IOTag = "policer"
IOTagTreeSync IOTag = "treesync"
IOTagBackground IOTag = "background"
IOTagWritecache IOTag = "writecache"
IOTagPolicer IOTag = "policer"
IOTagCritical IOTag = "critical"
ioTagUnknown IOTag = ""
)
func FromRawString(s string) (IOTag, error) {
switch s {
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagCritical):
return IOTagCritical, nil
case string(IOTagClient):
return IOTagClient, nil
case string(IOTagInternal):
return IOTagInternal, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
case string(IOTagTreeSync):
return IOTagTreeSync, nil
case string(IOTagBackground):
return IOTagBackground, nil
case string(IOTagWritecache):
return IOTagWritecache, nil
case string(IOTagPolicer):
return IOTagPolicer, nil
default:
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
}
@ -53,7 +50,3 @@ func IOTagFromContext(ctx context.Context) string {
}
return tag
}
func (t IOTag) IsLocal() bool {
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
}

View file

@ -42,12 +42,11 @@ func validateOpConfig(c limits.OpConfig) error {
func validateTags(configTags []limits.IOTagConfig) error {
tags := map[IOTag]tagConfig{
IOTagBackground: {},
IOTagClient: {},
IOTagInternal: {},
IOTagPolicer: {},
IOTagTreeSync: {},
IOTagBackground: {},
IOTagWritecache: {},
IOTagPolicer: {},
}
for _, t := range configTags {
tag, err := FromRawString(t.Tag)

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -411,11 +410,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -484,12 +483,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -560,12 +559,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
},
),
WithNetmapSource(
&utilTesting.TestNetmapSource{
Netmaps: map[uint64]*netmap.NetMap{
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
CurrentEpoch: curEpoch,
currentEpoch: curEpoch,
},
),
WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))),
@ -597,3 +596,26 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container
func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) {
return nil, nil
}
type testNetmapSource struct {
netmaps map[uint64]*netmap.NetMap
currentEpoch uint64
}
func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff")
}
return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff)
}
func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, fmt.Errorf("netmap not found")
}
func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) {
return s.currentEpoch, nil
}

View file

@ -2,11 +2,8 @@ package engine
import (
"context"
"fmt"
"path/filepath"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
@ -160,74 +157,26 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
var _ qos.Limiter = (*testQoSLimiter)(nil)
type testQoSLimiter struct {
t testing.TB
quard sync.Mutex
id int64
readStacks map[int64][]byte
writeStacks map[int64][]byte
t testing.TB
read atomic.Int64
write atomic.Int64
}
func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
func (t *testQoSLimiter) Close() {
t.quard.Lock()
defer t.quard.Unlock()
var sb strings.Builder
var seqN int
for _, stack := range t.readStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
}
for _, stack := range t.writeStacks {
seqN++
sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
}
require.True(t.t, seqN == 0, sb.String())
require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0")
require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0")
}
func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock()
defer t.quard.Unlock()
stack := debug.Stack()
t.id++
id := t.id
if t.readStacks == nil {
t.readStacks = make(map[int64][]byte)
}
t.readStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.readStacks, id)
}, nil
t.read.Add(1)
return func() { t.read.Add(-1) }, nil
}
func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
t.quard.Lock()
defer t.quard.Unlock()
stack := debug.Stack()
t.id++
id := t.id
if t.writeStacks == nil {
t.writeStacks = make(map[int64][]byte)
}
t.writeStacks[id] = stack
return func() {
t.quard.Lock()
defer t.quard.Unlock()
delete(t.writeStacks, id)
}, nil
t.write.Add(1)
return func() { t.write.Add(-1) }, nil
}
func (t *testQoSLimiter) SetParentID(string) {}

View file

@ -318,6 +318,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M
// HandleNewEpoch notifies every shard about NewEpoch event.
func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
ev := shard.EventNewEpoch(epoch)
e.mtx.RLock()
defer e.mtx.RUnlock()
@ -325,7 +327,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) {
select {
case <-ctx.Done():
return
case sh.NotificationChannel() <- epoch:
case sh.NotificationChannel() <- ev:
default:
e.log.Debug(ctx, logs.ShardEventProcessingInProgress,
zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID()))

View file

@ -14,7 +14,10 @@ type bucketCache struct {
}
func newBucketCache() *bucketCache {
return &bucketCache{}
return &bucketCache{
expired: make(map[cid.ID]*bbolt.Bucket),
primary: make(map[cid.ID]*bbolt.Bucket),
}
}
func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket {
@ -53,7 +56,7 @@ func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
bucketName = objectToExpirationEpochBucketName(cnr, bucketName)
return tx.Bucket(bucketName)
}
return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr)
return getMappedBucket(bc.expired, tx, objectToExpirationEpochBucketName, cnr)
}
func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
@ -62,21 +65,17 @@ func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket {
bucketName = primaryBucketName(cnr, bucketName)
return tx.Bucket(bucketName)
}
return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr)
return getMappedBucket(bc.primary, tx, primaryBucketName, cnr)
}
func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
value, ok := (*m)[cnr]
func getMappedBucket(m map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket {
value, ok := m[cnr]
if ok {
return value
}
if *m == nil {
*m = make(map[cid.ID]*bbolt.Bucket, 1)
}
bucketName := make([]byte, bucketKeySize)
bucketName = nameFunc(cnr, bucketName)
(*m)[cnr] = getBucket(&value, tx, bucketName)
m[cnr] = getBucket(&value, tx, bucketName)
return value
}

View file

@ -2,7 +2,6 @@ package meta
import (
"context"
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
@ -88,10 +87,16 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
}
func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch)
data, err := db.getWithCacheRaw(nil, tx, addr, key, checkStatus, raw, currEpoch)
if err != nil {
return nil, err
}
obj := objectSDK.New()
return obj, obj.Unmarshal(data)
}
func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) {
func (db *DB) getWithCacheRaw(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) ([]byte, error) {
if checkStatus {
st, err := objectStatusWithCache(bc, tx, addr, currEpoch)
if err != nil {
@ -109,13 +114,12 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
key = objectKey(addr.Object(), key)
cnr := addr.Container()
obj := objectSDK.New()
bucketName := make([]byte, bucketKeySize)
// check in primary index
if b := getPrimaryBucket(bc, tx, cnr); b != nil {
if data := b.Get(key); len(data) != 0 {
return obj, obj.Unmarshal(data)
return data, nil
}
}
@ -127,17 +131,17 @@ func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key
// if not found then check in tombstone index
data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key)
if len(data) != 0 {
return obj, obj.Unmarshal(data)
return data, nil
}
// if not found then check in locker index
data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key)
if len(data) != 0 {
return obj, obj.Unmarshal(data)
return data, nil
}
// if not found then check if object is a virtual
return getVirtualObject(tx, cnr, key, raw)
return getVirtualObjectNoUnmarshal(tx, cnr, key, raw)
}
func getFromBucket(tx *bbolt.Tx, name, key []byte) []byte {
@ -150,6 +154,16 @@ func getFromBucket(tx *bbolt.Tx, name, key []byte) []byte {
}
func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSDK.Object, error) {
data, err := getVirtualObjectNoUnmarshal(tx, cnr, key, raw)
if err != nil {
return nil, err
}
obj := objectSDK.New()
return obj, obj.Unmarshal(data)
}
func getVirtualObjectNoUnmarshal(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) ([]byte, error) {
if raw {
return nil, getSplitInfoError(tx, cnr, key)
}
@ -187,20 +201,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
}
}
child := objectSDK.New()
err = child.Unmarshal(data)
if err != nil {
return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()
if par == nil { // this should never happen though
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
return par, nil
return parentFromChild(data)
}
func getSplitInfoError(tx *bbolt.Tx, cnr cid.ID, key []byte) error {

View file

@ -139,7 +139,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
var containerID cid.ID
var offset []byte
bc := newBucketCache()
graveyardBkt := tx.Bucket(graveyardBucketName)
garbageBkt := tx.Bucket(garbageBucketName)
rawAddr := make([]byte, cidSize, addressKeySize)
@ -168,7 +169,7 @@ loop:
bkt := tx.Bucket(name)
if bkt != nil {
copy(rawAddr, cidRaw)
result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
result, count, cursor, threshold, currEpoch)
if err != nil {
return nil, nil, err
@ -203,10 +204,9 @@ loop:
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
// object to start selecting from. Ignores inhumed objects.
func selectNFromBucket(
bc *bucketCache,
bkt *bbolt.Bucket, // main bucket
func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
objType objectSDK.Type, // type of the objects stored in the main bucket
graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
cidRaw []byte, // container ID prefix, optimization
cnt cid.ID, // container ID
to []objectcore.Info, // listing result
@ -219,6 +219,7 @@ func selectNFromBucket(
cursor = new(Cursor)
}
count := len(to)
c := bkt.Cursor()
k, v := c.First()
@ -230,7 +231,7 @@ func selectNFromBucket(
}
for ; k != nil; k, v = c.Next() {
if len(to) >= limit {
if count >= limit {
break
}
@ -240,8 +241,6 @@ func selectNFromBucket(
}
offset = k
graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
garbageBkt := getGarbageBucket(bc, bkt.Tx())
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
continue
}
@ -252,7 +251,7 @@ func selectNFromBucket(
}
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
if hasExpEpoch && expEpoch < currEpoch && !objectLocked(bkt.Tx(), cnt, obj) {
continue
}
@ -274,6 +273,7 @@ func selectNFromBucket(
a.SetContainer(cnt)
a.SetObject(obj)
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
count++
}
return to, offset, cursor, nil

View file

@ -59,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
if !errors.Is(err, meta.ErrEndOfListing) {
if errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)

View file

@ -458,11 +458,22 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
return result, true
}
obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
src, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch)
if err != nil {
return result, false
}
var obj *objectSDK.Object
for i := range f {
if strings.HasPrefix(f[i].Header(), v2object.ReservedFilterPrefix) {
obj = objectSDK.New()
if err := obj.Unmarshal(src); err != nil {
return result, false
}
break
}
}
for i := range f {
var data []byte
switch f[i].Header() {
@ -492,12 +503,15 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
cs, _ := obj.PayloadChecksum()
data = cs.Value()
default: // user attribute
v, ok := attributeValue(obj, f[i].Header())
if ok {
if ech := obj.ECHeader(); ech != nil {
result.SetObject(ech.Parent())
res, err := attributeValueRaw(src, f[i].Header())
if err != nil {
return result, false
}
if res.attribute.found {
if res.isEC {
result.SetObject(res.parentID)
}
data = []byte(v)
data = []byte(res.attribute.value)
} else {
return result, f[i].Operation() == objectSDK.MatchNotPresent
}
@ -516,9 +530,9 @@ func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address,
return result, true
}
func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) ([]byte, bool, error) {
buf := make([]byte, addressKeySize)
obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch)
data, err := db.getWithCacheRaw(bc, tx, addr, buf, false, false, currEpoch)
if err != nil {
var ecInfoError *objectSDK.ECInfoError
if errors.As(err, &ecInfoError) {
@ -528,15 +542,15 @@ func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Ad
continue
}
addr.SetObject(objID)
obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch)
data, err = db.getWithCacheRaw(bc, tx, addr, buf, true, false, currEpoch)
if err == nil {
return obj, true, nil
return data, true, nil
}
}
}
return nil, false, err
}
return obj, false, nil
return data, false, nil
}
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {

View file

@ -0,0 +1,261 @@
package meta
import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/VictoriaMetrics/easyproto"
)
var errMalformedObject = errors.New("malformed object")
type protoAttribute struct {
found bool
value string
}
type protoHeader struct {
attribute protoAttribute
ecHeader protoECHeader
}
type protoECHeader struct {
parentID protoOID
attribute protoAttribute
}
type protoOID oid.ID
type protoIter struct {
fc easyproto.FieldContext
data []byte
failed bool
eof bool
}
func newProtoIter(data []byte, ok bool) protoIter {
return protoIter{data: data, failed: !ok}
}
func (p *protoIter) err() error {
if p.failed {
return errMalformedObject
}
return nil
}
func (p *protoIter) finished() bool {
return p.failed || p.eof
}
func (p *protoIter) next() {
if p.failed {
return
}
if len(p.data) == 0 {
p.eof = true
return
}
data, err := p.fc.NextField(p.data)
if err != nil {
p.failed = true
return
}
p.data = data
}
func (p *protoOID) fill(fc easyproto.FieldContext) error {
iter := newProtoIter(fc.MessageData())
for iter.next(); !iter.finished(); iter.next() {
if iter.fc.FieldNum == 1 { // Wire number for `id` field.
rawID, ok := iter.fc.Bytes()
if !ok {
return errMalformedObject
}
return (*oid.ID)(p).Decode(rawID)
}
}
return iter.err()
}
func (p *protoAttribute) fill(fc easyproto.FieldContext, attribute string) error {
var key, value string
iter := newProtoIter(fc.MessageData())
for iter.next(); !iter.finished(); iter.next() {
var ok bool
switch iter.fc.FieldNum {
case 1: // Wire number for `key` field.
key, ok = iter.fc.String()
case 2: // Wire number for `value` field.
value, ok = iter.fc.String()
default:
continue
}
if !ok {
return errMalformedObject
}
}
if key == attribute {
p.found = true
p.value = value
}
return iter.err()
}
func (p *protoECHeader) fill(fc easyproto.FieldContext, attribute string) error {
iter := newProtoIter(fc.MessageData())
for iter.next(); !iter.finished(); iter.next() {
var err error
switch iter.fc.FieldNum {
case 1: // Wire number for `parent` field.
err = p.parentID.fill(iter.fc)
case 8: // Wire number for `parent_attributes` field.
err = p.attribute.fill(iter.fc, attribute)
}
if err != nil {
return err
}
}
return iter.err()
}
func (p *protoHeader) fill(fc easyproto.FieldContext, attribute string) error {
iter := newProtoIter(fc.MessageData())
for iter.next(); !iter.finished(); iter.next() {
var err error
switch iter.fc.FieldNum {
case 10: // Wire number for `attributes` field.
err = p.attribute.fill(iter.fc, attribute)
case 12: // Wire number for `ec` field.
err = p.ecHeader.fill(iter.fc, attribute)
}
if err != nil {
return err
}
}
return iter.err()
}
type attributeMatchResult struct {
attribute protoAttribute
isEC bool
parentID oid.ID
}
func attributeValueRaw(src []byte, attribute string) (attributeMatchResult, error) {
iter := newProtoIter(src, true)
for iter.next(); !iter.finished(); iter.next() {
if iter.fc.FieldNum == 3 { // Wire number for `header` field.
var p protoHeader
if err := p.fill(iter.fc, attribute); err != nil {
return attributeMatchResult{}, err
}
if p.ecHeader.attribute.found {
return attributeMatchResult{
attribute: p.ecHeader.attribute,
isEC: true,
parentID: oid.ID(p.ecHeader.parentID),
}, nil
} else {
return attributeMatchResult{attribute: p.attribute}, nil
}
}
}
return attributeMatchResult{}, iter.err()
}
func parentFromChild(src []byte) ([]byte, error) {
iter := newProtoIter(src, true)
for iter.next(); !iter.finished(); iter.next() {
if iter.fc.FieldNum == 3 {
return parentFromChildHeader(iter.fc)
}
}
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
func parentFromChildHeader(fc easyproto.FieldContext) ([]byte, error) {
iter := newProtoIter(fc.MessageData())
for iter.next(); !iter.finished(); iter.next() {
if iter.fc.FieldNum == 11 {
return parentFromSplitHeader(iter.fc)
}
}
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
func parentFromSplitHeader(fc easyproto.FieldContext) ([]byte, error) {
iter := newProtoIter(fc.MessageData())
var parentID, parentSig, parentHdr []byte
for iter.next(); !iter.finished(); iter.next() {
var ok bool
switch iter.fc.FieldNum {
case 1: // parent id
parentID, ok = iter.fc.MessageData()
case 3: // parent signature
parentSig, ok = iter.fc.MessageData()
case 4: // parent header
parentHdr, ok = iter.fc.MessageData()
default:
continue
}
if !ok {
return nil, errMalformedObject
}
}
if parentSig == nil || parentHdr == nil {
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
h := &header{
parentID: parentID,
parentSig: parentSig,
parentHdr: parentHdr,
}
return h.StableMarshal(make([]byte, h.StableSize())), nil
}
type (
header struct {
parentID bs
parentSig bs
parentHdr bs
}
)
func (o *header) StableSize() int {
var size int
size += proto.NestedStructureSize(1, &o.parentID)
size += proto.NestedStructureSize(2, &o.parentSig)
size += proto.NestedStructureSize(3, &o.parentHdr)
return size
}
func (o *header) StableMarshal(buf []byte) []byte {
var offset int
offset += proto.NestedStructureMarshal(1, buf[offset:], &o.parentID)
offset += proto.NestedStructureMarshal(2, buf[offset:], &o.parentSig)
offset += proto.NestedStructureMarshal(3, buf[offset:], &o.parentHdr)
return buf[:offset]
}
type bs []byte
func (b *bs) StableSize() int {
return len(*b)
}
func (b *bs) StableMarshal(dst []byte) []byte {
copy(dst, *b)
return dst[:len(*b)]
}

View file

@ -0,0 +1,101 @@
package meta
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
func TestAttributeValueRaw_ZeroAlloc(t *testing.T) {
const attrCount = 4
attrs := make([]objectSDK.Attribute, attrCount)
for i := range attrs {
attrs[i].SetKey(strconv.Itoa(i))
attrs[i].SetValue(strconv.Itoa(i * 1000))
}
cnr := cidtest.ID()
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: oidtest.ID(), Attributes: attrs[:attrCount/2]}, 0, 2, nil, 0)
obj := testutil.GenerateObjectWithCID(cnr)
obj.SetECHeader(ech)
obj.SetAttributes(attrs[attrCount/2:]...)
raw, err := obj.Marshal()
require.NoError(t, err)
require.Zero(t, testing.AllocsPerRun(100, func() {
res, err := attributeValueRaw(raw, "0")
if err != nil || !res.attribute.found {
t.Fatalf("err %v, found %t", err, res.attribute.found)
}
}))
}
func BenchmarkParentFromChild(b *testing.B) {
cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCIDWithPayload(cnr, nil)
parent.SetPayloadSize(1234)
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(b, err)
require.NoError(b, objectSDK.CalculateAndSetSignature(*pk, parent))
child := testutil.GenerateObjectWithCID(cnr)
child.SetParent(parent)
data, err := child.Marshal()
require.NoError(b, err)
o1, err := parentFromChildUnoptimized(data)
require.NoError(b, err)
o2, err := parentFromChild(data)
require.NoError(b, err)
require.Equal(b, o1, o2)
b.Run("unoptimized", func(b *testing.B) {
for range b.N {
_, err := parentFromChildUnoptimized(data)
if err != nil {
b.Fatal(err)
}
}
})
b.Run("proto access", func(b *testing.B) {
for range b.N {
_, err := parentFromChild(data)
if err != nil {
b.Fatal(err)
}
}
})
}
func parentFromChildUnoptimized(src []byte) ([]byte, error) {
child := objectSDK.New()
err := child.Unmarshal(src)
if err != nil {
return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()
if par == nil { // this should never happen though
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
return par.Marshal()
}

View file

@ -108,17 +108,19 @@ func (s *Shard) Init(ctx context.Context) error {
s.updateMetrics(ctx)
s.gc = &gc{
gcCfg: &s.gcCfg,
remover: s.removeGarbage,
stopChannel: make(chan struct{}),
newEpochChan: make(chan uint64),
newEpochHandlers: &newEpochHandlers{
cancelFunc: func() {},
handlers: []newEpochHandler{
s.collectExpiredLocks,
s.collectExpiredObjects,
s.collectExpiredTombstones,
s.collectExpiredMetrics,
gcCfg: &s.gcCfg,
remover: s.removeGarbage,
stopChannel: make(chan struct{}),
eventChan: make(chan Event),
mEventHandler: map[eventType]*eventHandlers{
eventNewEpoch: {
cancelFunc: func() {},
handlers: []eventHandler{
s.collectExpiredLocks,
s.collectExpiredObjects,
s.collectExpiredTombstones,
s.collectExpiredMetrics,
},
},
},
}

View file

@ -33,14 +33,41 @@ type TombstoneSource interface {
IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool
}
type newEpochHandler func(context.Context, uint64)
// Event represents class of external events.
type Event interface {
typ() eventType
}
type newEpochHandlers struct {
type eventType int
const (
_ eventType = iota
eventNewEpoch
)
type newEpoch struct {
epoch uint64
}
func (e newEpoch) typ() eventType {
return eventNewEpoch
}
// EventNewEpoch returns new epoch event.
func EventNewEpoch(e uint64) Event {
return newEpoch{
epoch: e,
}
}
type eventHandler func(context.Context, Event)
type eventHandlers struct {
prevGroup sync.WaitGroup
cancelFunc context.CancelFunc
handlers []newEpochHandler
handlers []eventHandler
}
type gcRunResult struct {
@ -82,10 +109,10 @@ type gc struct {
remover func(context.Context) gcRunResult
// newEpochChan is used only for listening for the new epoch event.
// eventChan is used only for listening for the new epoch event.
// It is ok to keep opened, we are listening for context done when writing in it.
newEpochChan chan uint64
newEpochHandlers *newEpochHandlers
eventChan chan Event
mEventHandler map[eventType]*eventHandlers
}
type gcCfg struct {
@ -115,7 +142,15 @@ func defaultGCCfg() gcCfg {
}
func (gc *gc) init(ctx context.Context) {
gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers))
sz := 0
for _, v := range gc.mEventHandler {
sz += len(v.handlers)
}
if sz > 0 {
gc.workerPool = gc.workerPoolInit(sz)
}
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
gc.wg.Add(2)
go gc.tickRemover(ctx)
@ -133,7 +168,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
case <-ctx.Done():
gc.log.Warn(ctx, logs.ShardStopEventListenerByContext)
return
case event, ok := <-gc.newEpochChan:
case event, ok := <-gc.eventChan:
if !ok {
gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel)
return
@ -144,33 +179,38 @@ func (gc *gc) listenEvents(ctx context.Context) {
}
}
func (gc *gc) handleEvent(ctx context.Context, epoch uint64) {
gc.newEpochHandlers.cancelFunc()
gc.newEpochHandlers.prevGroup.Wait()
func (gc *gc) handleEvent(ctx context.Context, event Event) {
v, ok := gc.mEventHandler[event.typ()]
if !ok {
return
}
v.cancelFunc()
v.prevGroup.Wait()
var runCtx context.Context
runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx)
runCtx, v.cancelFunc = context.WithCancel(ctx)
gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers))
v.prevGroup.Add(len(v.handlers))
for i := range gc.newEpochHandlers.handlers {
for i := range v.handlers {
select {
case <-ctx.Done():
return
default:
}
h := gc.newEpochHandlers.handlers[i]
h := v.handlers[i]
err := gc.workerPool.Submit(func() {
defer gc.newEpochHandlers.prevGroup.Done()
h(runCtx, epoch)
defer v.prevGroup.Done()
h(runCtx, event)
})
if err != nil {
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
zap.Error(err),
)
gc.newEpochHandlers.prevGroup.Done()
v.prevGroup.Done()
}
}
}
@ -227,9 +267,6 @@ func (gc *gc) stop(ctx context.Context) {
gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
gc.newEpochHandlers.cancelFunc()
gc.newEpochHandlers.prevGroup.Wait()
}
// iterates over metabase and deletes objects
@ -325,7 +362,7 @@ func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
return
}
func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -333,8 +370,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular)
}()
s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch))
s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@ -343,7 +380,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock {
batch = append(batch, o.Address())
@ -449,7 +486,7 @@ func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeR
return s.metaBase.Inhume(ctx, inhumePrm)
}
func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -457,6 +494,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone)
}()
epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling)
@ -489,8 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
return
}
var release qos.ReleaseFunc
release, err = s.opsLimiter.ReadRequest(ctx)
release, err := s.opsLimiter.ReadRequest(ctx)
if err != nil {
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
@ -528,7 +565,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) {
}
}
func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
var err error
startedAt := time.Now()
@ -536,8 +573,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock)
}()
s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch))
s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch))
workersCount, batchSize := s.getExpiredObjectsParameters()
@ -547,14 +584,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
errGroup.Go(func() error {
batch := make([]oid.Address, 0, batchSize)
expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) {
expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) {
if o.Type() == objectSDK.TypeLock {
batch = append(batch, o.Address())
if len(batch) == batchSize {
expired := batch
errGroup.Go(func() error {
s.expiredLocksCallback(egCtx, epoch, expired)
s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
batch = make([]oid.Address, 0, batchSize)
@ -568,7 +605,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) {
if len(batch) > 0 {
expired := batch
errGroup.Go(func() error {
s.expiredLocksCallback(egCtx, epoch, expired)
s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired)
return egCtx.Err()
})
}
@ -667,10 +704,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
// HandleExpiredLocks unlocks all objects which were locked by lockers.
// If successful, marks lockers themselves as garbage.
func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
if s.GetMode().NoMetabase() {
return
}
@ -733,10 +767,7 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc
// HandleDeletedLocks unlocks all objects which were locked by lockers.
func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
if s.GetMode().NoMetabase() {
return
}
@ -753,15 +784,17 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
}
}
// NotificationChannel returns channel for new epoch events.
func (s *Shard) NotificationChannel() chan<- uint64 {
return s.gc.newEpochChan
// NotificationChannel returns channel for shard events.
func (s *Shard) NotificationChannel() chan<- Event {
return s.gc.eventChan
}
func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) {
func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) {
ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics")
defer span.End()
epoch := e.(newEpoch).epoch
s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch))
defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch))

View file

@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
require.NoError(t, err)
epoch.Value = 105
sh.gc.handleEvent(context.Background(), epoch.Value)
sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
var getPrm GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
require.True(t, errors.As(err, &splitInfoError), "split info must be provided")
epoch.Value = 105
sh.gc.handleEvent(context.Background(), epoch.Value)
sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value))
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires")

View file

@ -9,7 +9,6 @@ import (
"sync/atomic"
"time"
nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
@ -61,9 +60,6 @@ type Client struct {
rpcActor *actor.Actor // neo-go RPC actor
gasToken *nep17.Token // neo-go GAS token wrapper
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
nnsHash util.Uint160 // NNS contract hash
nnsReader *nnsClient.ContractReader // NNS contract wrapper
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
@ -98,12 +94,27 @@ type Client struct {
type cache struct {
m sync.RWMutex
nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache[util.Uint256, uint32]
metrics metrics.MorphCacheMetrics
}
func (c *cache) nns() *util.Uint160 {
c.m.RLock()
defer c.m.RUnlock()
return c.nnsHash
}
func (c *cache) setNNSHash(nnsHash util.Uint160) {
c.m.Lock()
defer c.m.Unlock()
c.nnsHash = &nnsHash
}
func (c *cache) groupKey() *keys.PublicKey {
c.m.RLock()
defer c.m.RUnlock()
@ -122,6 +133,7 @@ func (c *cache) invalidate() {
c.m.Lock()
defer c.m.Unlock()
c.nnsHash = nil
c.gKey = nil
c.txHeights.Purge()
}
@ -579,7 +591,6 @@ func (c *Client) setActor(act *actor.Actor) {
c.rpcActor = act
c.gasToken = nep17.New(act, gas.Hash)
c.rolemgmt = rolemgmt.New(act)
c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
}
func (c *Client) GetActor() *actor.Actor {

View file

@ -145,11 +145,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
if cli.client == nil {
return nil, ErrNoHealthyEndpoint
}
cs, err := cli.client.GetContractStateByID(nnsContractID)
if err != nil {
return nil, fmt.Errorf("resolve nns hash: %w", err)
}
cli.nnsHash = cs.Hash
cli.setActor(act)
go cli.closeWaiter(ctx)

View file

@ -8,12 +8,14 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
)
const (
@ -35,8 +37,12 @@ const (
NNSPolicyContractName = "policy.frostfs"
)
// ErrNNSRecordNotFound means that there is no such record in NNS contract.
var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
var (
// ErrNNSRecordNotFound means that there is no such record in NNS contract.
ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
errEmptyResultStack = errors.New("returned result stack is empty")
)
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
// based on alphabet index.
@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
return util.Uint160{}, ErrConnectionLost
}
sh, err = nnsResolve(c.nnsReader, name)
nnsHash, err := c.NNSHash()
if err != nil {
return util.Uint160{}, err
}
sh, err = nnsResolve(c.client, nnsHash, name)
if err != nil {
return sh, fmt.Errorf("NNS.resolve: %w", err)
}
return sh, nil
}
func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
available, err := r.IsAvailable(domain)
// NNSHash returns NNS contract hash.
func (c *Client) NNSHash() (util.Uint160, error) {
c.switchLock.RLock()
defer c.switchLock.RUnlock()
if c.inactive {
return util.Uint160{}, ErrConnectionLost
}
success := false
startedAt := time.Now()
defer func() {
c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
}()
nnsHash := c.cache.nns()
if nnsHash == nil {
cs, err := c.client.GetContractStateByID(nnsContractID)
if err != nil {
return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
}
c.cache.setNNSHash(cs.Hash)
nnsHash = &cs.Hash
}
success = true
return *nnsHash, nil
}
func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
found, err := exists(c, nnsHash, domain)
if err != nil {
return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
}
if available {
if !found {
return nil, ErrNNSRecordNotFound
}
return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
{
Type: smartcontract.StringType,
Value: domain,
},
{
Type: smartcontract.IntegerType,
Value: big.NewInt(int64(nns.TXT)),
},
}, nil)
if err != nil {
return nil, err
}
if result.State != vmstate.Halt.String() {
return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
}
if len(result.Stack) == 0 {
return nil, errEmptyResultStack
}
return result.Stack[0], nil
}
func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
arr, err := nnsResolveItem(r, domain)
func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
res, err := nnsResolveItem(c, nnsHash, domain)
if err != nil {
return util.Uint160{}, err
}
if len(arr) == 0 {
return util.Uint160{}, errors.New("NNS record is missing")
// Parse the result of resolving NNS record.
// It works with multiple formats (corresponding to multiple NNS versions).
// If array of hashes is provided, it returns only the first one.
if arr, ok := res.Value().([]stackitem.Item); ok {
if len(arr) == 0 {
return util.Uint160{}, errors.New("NNS record is missing")
}
res = arr[0]
}
bs, err := arr[0].TryBytes()
bs, err := res.TryBytes()
if err != nil {
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
}
@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error
return util.Uint160{}, errors.New("no valid hashes are found")
}
func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
{
Type: smartcontract.StringType,
Value: domain,
},
}, nil)
if err != nil {
return false, err
}
if len(result.Stack) == 0 {
return false, errEmptyResultStack
}
res := result.Stack[0]
available, err := res.TryBool()
if err != nil {
return false, fmt.Errorf("malformed response: %w", err)
}
// not available means that it is taken
// and, therefore, exists
return !available, nil
}
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
// Should be called for side-chain client only.
func (c *Client) SetGroupSignerScope() error {
@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
return gKey, nil
}
arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
nnsHash, err := c.NNSHash()
if err != nil {
return nil, err
}
if len(arr) == 0 {
item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
if err != nil {
return nil, err
}
arr, ok := item.Value().([]stackitem.Item)
if !ok || len(arr) == 0 {
return nil, errors.New("NNS record is missing")
}

View file

@ -38,7 +38,8 @@ type (
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
proxy util.Uint160
notary util.Uint160
proxy util.Uint160
}
notaryCfg struct {
@ -101,6 +102,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
txValidTime: cfg.txValidTime,
roundTime: cfg.roundTime,
alphabetSource: cfg.alphabetSource,
notary: notary.Hash,
}
c.notary = notaryCfg
@ -186,7 +188,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8)
func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
txHash, vub, err := c.gasToken.Transfer(
c.accAddr,
notary.Hash,
c.notary.notary,
big.NewInt(int64(amount)),
[]any{c.acc.PrivateKey().GetScriptHash(), till})
if err != nil {

View file

@ -66,8 +66,8 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
grpc.WithChainUnaryInterceptor(
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInterceptor(),
tagging.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
tagging.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),

View file

@ -0,0 +1,166 @@
package v2
import (
"context"
"crypto/ecdsa"
"errors"
"testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
type testLocalStorage struct {
t *testing.T
expAddr oid.Address
obj *objectSDK.Object
err error
}
func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
return s.obj, s.err
}
func testXHeaders(strs ...string) []session.XHeader {
res := make([]session.XHeader, len(strs)/2)
for i := 0; i < len(strs); i += 2 {
res[i/2].SetKey(strs[i])
res[i/2].SetValue(strs[i+1])
}
return res
}
func TestHeadRequest(t *testing.T) {
req := new(objectV2.HeadRequest)
meta := new(session.RequestMetaHeader)
req.SetMetaHeader(meta)
body := new(objectV2.HeadRequestBody)
req.SetBody(body)
addr := oidtest.Address()
var addrV2 refs.Address
addr.WriteToV2(&addrV2)
body.SetAddress(&addrV2)
xKey := "x-key"
xVal := "x-val"
xHdrs := testXHeaders(
xKey, xVal,
)
meta.SetXHeaders(xHdrs)
obj := objectSDK.New()
attrKey := "attr_key"
attrVal := "attr_val"
var attr objectSDK.Attribute
attr.SetKey(attrKey)
attr.SetValue(attrVal)
obj.SetAttributes(attr)
table := new(eaclSDK.Table)
priv, err := keys.NewPrivateKey()
require.NoError(t, err)
senderKey := priv.PublicKey()
r := eaclSDK.NewRecord()
r.SetOperation(eaclSDK.OperationHead)
r.SetAction(eaclSDK.ActionDeny)
r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
table.AddRecord(r)
lStorage := &testLocalStorage{
t: t,
expAddr: addr,
obj: obj,
}
id := addr.Object()
newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
hdrSrc, err := NewMessageHeaderSource(
lStorage,
NewRequestXHeaderSource(req),
addr.Container(),
WithOID(&id))
require.NoError(t, err)
return hdrSrc
}
cnr := addr.Container()
unit := new(eaclSDK.ValidationUnit).
WithContainerID(&cnr).
WithOperation(eaclSDK.OperationHead).
WithSenderKey(senderKey.Bytes()).
WithEACLTable(table)
validator := eaclSDK.NewValidator()
checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
meta.SetXHeaders(nil)
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
meta.SetXHeaders(xHdrs)
obj.SetAttributes()
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
lStorage.err = errors.New("any error")
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
r.SetAction(eaclSDK.ActionAllow)
rID := eaclSDK.NewRecord()
rID.SetOperation(eaclSDK.OperationHead)
rID.SetAction(eaclSDK.ActionDeny)
rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
table = eaclSDK.NewTable()
table.AddRecord(r)
table.AddRecord(rID)
unit.WithEACLTable(table)
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
}
func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
actual, fromRule := v.CalculateAction(u)
require.True(t, fromRule)
require.Equal(t, expected, actual)
}
func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
actual, fromRule := v.CalculateAction(u)
require.False(t, fromRule)
require.Equal(t, eaclSDK.ActionAllow, actual)
}

View file

@ -0,0 +1,246 @@
package v2
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
type Option func(*cfg)
type cfg struct {
storage ObjectStorage
msg XHeaderSource
cnr cid.ID
obj *oid.ID
}
type ObjectStorage interface {
Head(context.Context, oid.Address) (*objectSDK.Object, error)
}
type Request interface {
GetMetaHeader() *session.RequestMetaHeader
}
type Response interface {
GetMetaHeader() *session.ResponseMetaHeader
}
type headerSource struct {
requestHeaders []eaclSDK.Header
objectHeaders []eaclSDK.Header
incompleteObjectHeaders bool
}
func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
cfg := &cfg{
storage: os,
cnr: cnrID,
msg: xhs,
}
for i := range opts {
opts[i](cfg)
}
if cfg.msg == nil {
return nil, errors.New("message is not provided")
}
var res headerSource
err := cfg.readObjectHeaders(&res)
if err != nil {
return nil, err
}
res.requestHeaders = cfg.msg.GetXHeaders()
return res, nil
}
func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
switch typ {
default:
return nil, true
case eaclSDK.HeaderFromRequest:
return h.requestHeaders, true
case eaclSDK.HeaderFromObject:
return h.objectHeaders, !h.incompleteObjectHeaders
}
}
type xHeader session.XHeader
func (x xHeader) Key() string {
return (*session.XHeader)(&x).GetKey()
}
func (x xHeader) Value() string {
return (*session.XHeader)(&x).GetValue()
}
var errMissingOID = errors.New("object ID is missing")
func (h *cfg) readObjectHeaders(dst *headerSource) error {
switch m := h.msg.(type) {
default:
panic(fmt.Sprintf("unexpected message type %T", h.msg))
case requestXHeaderSource:
return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
case responseXHeaderSource:
return h.readObjectHeadersResponseXHeaderSource(m, dst)
}
}
func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
switch req := m.req.(type) {
case
*objectV2.GetRequest,
*objectV2.HeadRequest:
if h.obj == nil {
return errMissingOID
}
objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
dst.objectHeaders = objHeaders
dst.incompleteObjectHeaders = !completed
case
*objectV2.GetRangeRequest,
*objectV2.GetRangeHashRequest,
*objectV2.DeleteRequest:
if h.obj == nil {
return errMissingOID
}
dst.objectHeaders = addressHeaders(h.cnr, h.obj)
case *objectV2.PutRequest:
if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
oV2 := new(objectV2.Object)
oV2.SetObjectID(v.GetObjectID())
oV2.SetHeader(v.GetHeader())
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
}
case *objectV2.PutSingleRequest:
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
case *objectV2.SearchRequest:
cnrV2 := req.GetBody().GetContainerID()
var cnr cid.ID
if cnrV2 != nil {
if err := cnr.ReadFromV2(*cnrV2); err != nil {
return fmt.Errorf("can't parse container ID: %w", err)
}
}
dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
}
return nil
}
func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
switch resp := m.resp.(type) {
default:
objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
dst.objectHeaders = objectHeaders
dst.incompleteObjectHeaders = !completed
case *objectV2.GetResponse:
if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
oV2 := new(objectV2.Object)
oV2.SetObjectID(v.GetObjectID())
oV2.SetHeader(v.GetHeader())
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
}
case *objectV2.HeadResponse:
oV2 := new(objectV2.Object)
var hdr *objectV2.Header
switch v := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
hdr = new(objectV2.Header)
var idV2 refsV2.ContainerID
h.cnr.WriteToV2(&idV2)
hdr.SetContainerID(&idV2)
hdr.SetVersion(v.GetVersion())
hdr.SetCreationEpoch(v.GetCreationEpoch())
hdr.SetOwnerID(v.GetOwnerID())
hdr.SetObjectType(v.GetObjectType())
hdr.SetPayloadLength(v.GetPayloadLength())
case *objectV2.HeaderWithSignature:
hdr = v.GetHeader()
}
oV2.SetHeader(hdr)
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
}
return nil
}
func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
if idObj != nil {
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(*idObj)
obj, err := h.storage.Head(context.TODO(), addr)
if err == nil {
return headersFromObject(obj, cnr, idObj), true
}
}
return addressHeaders(cnr, idObj), false
}
func cidHeader(idCnr cid.ID) sysObjHdr {
return sysObjHdr{
k: acl.FilterObjectContainerID,
v: idCnr.EncodeToString(),
}
}
func oidHeader(obj oid.ID) sysObjHdr {
return sysObjHdr{
k: acl.FilterObjectID,
v: obj.EncodeToString(),
}
}
func ownerIDHeader(ownerID user.ID) sysObjHdr {
return sysObjHdr{
k: acl.FilterObjectOwnerID,
v: ownerID.EncodeToString(),
}
}
func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
hh := make([]eaclSDK.Header, 0, 2)
hh = append(hh, cidHeader(cnr))
if oid != nil {
hh = append(hh, oidHeader(*oid))
}
return hh
}

View file

@ -0,0 +1,92 @@
package v2
import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
type sysObjHdr struct {
k, v string
}
func (s sysObjHdr) Key() string {
return s.k
}
func (s sysObjHdr) Value() string {
return s.v
}
func u64Value(v uint64) string {
return strconv.FormatUint(v, 10)
}
func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
var count int
for obj := obj; obj != nil; obj = obj.Parent() {
count += 9 + len(obj.Attributes())
}
res := make([]eaclSDK.Header, 0, count)
for ; obj != nil; obj = obj.Parent() {
res = append(res,
cidHeader(cnr),
// creation epoch
sysObjHdr{
k: acl.FilterObjectCreationEpoch,
v: u64Value(obj.CreationEpoch()),
},
// payload size
sysObjHdr{
k: acl.FilterObjectPayloadLength,
v: u64Value(obj.PayloadSize()),
},
// object version
sysObjHdr{
k: acl.FilterObjectVersion,
v: obj.Version().String(),
},
// object type
sysObjHdr{
k: acl.FilterObjectType,
v: obj.Type().String(),
},
)
if oid != nil {
res = append(res, oidHeader(*oid))
}
if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
res = append(res, ownerIDHeader(idOwner))
}
cs, ok := obj.PayloadChecksum()
if ok {
res = append(res, sysObjHdr{
k: acl.FilterObjectPayloadHash,
v: cs.String(),
})
}
cs, ok = obj.PayloadHomomorphicHash()
if ok {
res = append(res, sysObjHdr{
k: acl.FilterObjectHomomorphicHash,
v: cs.String(),
})
}
attrs := obj.Attributes()
for i := range attrs {
res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
}
}
return res
}

View file

@ -0,0 +1,11 @@
package v2
import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
func WithOID(v *oid.ID) Option {
return func(c *cfg) {
c.obj = v
}
}

View file

@ -0,0 +1,69 @@
package v2
import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
)
type XHeaderSource interface {
GetXHeaders() []eaclSDK.Header
}
type requestXHeaderSource struct {
req Request
}
func NewRequestXHeaderSource(req Request) XHeaderSource {
return requestXHeaderSource{req: req}
}
type responseXHeaderSource struct {
resp Response
req Request
}
func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
return responseXHeaderSource{resp: resp, req: req}
}
func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
ln := 0
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
ln += len(meta.GetXHeaders())
}
res := make([]eaclSDK.Header, 0, ln)
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
x := meta.GetXHeaders()
for i := range x {
res = append(res, (xHeader)(x[i]))
}
}
return res
}
func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
ln := 0
xHdrs := make([][]session.XHeader, 0)
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
x := meta.GetXHeaders()
ln += len(x)
xHdrs = append(xHdrs, x)
}
res := make([]eaclSDK.Header, 0, ln)
for i := range xHdrs {
for j := range xHdrs[i] {
res = append(res, xHeader(xHdrs[i][j]))
}
}
return res
}

View file

@ -0,0 +1,20 @@
package v2
import (
"fmt"
)
const invalidRequestMessage = "malformed request"
func malformedRequestError(reason string) error {
return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
}
var (
errEmptyBody = malformedRequestError("empty body")
errEmptyVerificationHeader = malformedRequestError("empty verification header")
errEmptyBodySig = malformedRequestError("empty at body signature")
errInvalidSessionSig = malformedRequestError("invalid session token signature")
errInvalidSessionOwner = malformedRequestError("invalid session token owner")
errInvalidVerb = malformedRequestError("session token verb is invalid")
)

View file

@ -0,0 +1,12 @@
package v2
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
)
// WithLogger returns option to set logger.
func WithLogger(v *logger.Logger) Option {
return func(c *cfg) {
c.log = v
}
}

View file

@ -0,0 +1,152 @@
package v2
import (
"crypto/ecdsa"
"fmt"
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
// RequestInfo groups parsed version-independent (from SDK library)
// request information and raw API request.
type RequestInfo struct {
basicACL acl.Basic
requestRole acl.Role
operation acl.Op // put, get, head, etc.
cnrOwner user.ID // container owner
// cnrNamespace defined to which namespace a container is belonged.
cnrNamespace string
idCnr cid.ID
// optional for some request
// e.g. Put, Search
obj *oid.ID
senderKey []byte
bearer *bearer.Token // bearer token of request
srcRequest any
}
func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
r.basicACL = basicACL
}
func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
r.requestRole = requestRole
}
func (r *RequestInfo) SetSenderKey(senderKey []byte) {
r.senderKey = senderKey
}
// Request returns raw API request.
func (r RequestInfo) Request() any {
return r.srcRequest
}
// ContainerOwner returns owner if the container.
func (r RequestInfo) ContainerOwner() user.ID {
return r.cnrOwner
}
func (r RequestInfo) ContainerNamespace() string {
return r.cnrNamespace
}
// ObjectID return object ID.
func (r RequestInfo) ObjectID() *oid.ID {
return r.obj
}
// ContainerID return container ID.
func (r RequestInfo) ContainerID() cid.ID {
return r.idCnr
}
// CleanBearer forces cleaning bearer token information.
func (r *RequestInfo) CleanBearer() {
r.bearer = nil
}
// Bearer returns bearer token of the request.
func (r RequestInfo) Bearer() *bearer.Token {
return r.bearer
}
// BasicACL returns basic ACL of the container.
func (r RequestInfo) BasicACL() acl.Basic {
return r.basicACL
}
// SenderKey returns public key of the request's sender.
func (r RequestInfo) SenderKey() []byte {
return r.senderKey
}
// Operation returns request's operation.
func (r RequestInfo) Operation() acl.Op {
return r.operation
}
// RequestRole returns request sender's role.
func (r RequestInfo) RequestRole() acl.Role {
return r.requestRole
}
// MetaWithToken groups session and bearer tokens,
// verification header and raw API request.
type MetaWithToken struct {
vheader *sessionV2.RequestVerificationHeader
token *sessionSDK.Object
bearer *bearer.Token
src any
}
// RequestOwner returns ownerID and its public key
// according to internal meta information.
func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
if r.vheader == nil {
return nil, nil, errEmptyVerificationHeader
}
if r.bearer != nil && r.bearer.Impersonate() {
return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
}
// if session token is presented, use it as truth source
if r.token != nil {
// verify signature of session token
return ownerFromToken(r.token)
}
// otherwise get original body signature
bodySignature := originalBodySignature(r.vheader)
if bodySignature == nil {
return nil, nil, errEmptyBodySig
}
return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
}
func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
key, err := unmarshalPublicKey(rawKey)
if err != nil {
return nil, nil, fmt.Errorf("invalid signature key: %w", err)
}
var idSender user.ID
user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
return &idSender, key, nil
}

View file

@ -1,4 +1,4 @@
package ape
package v2
import (
"testing"
@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
vh.SetBodySignature(&userSignature)
t.Run("empty verification header", func(t *testing.T) {
req := Metadata{}
req := MetaWithToken{}
checkOwner(t, req, nil, errEmptyVerificationHeader)
})
t.Run("empty verification header signature", func(t *testing.T) {
req := Metadata{
VerificationHeader: new(sessionV2.RequestVerificationHeader),
req := MetaWithToken{
vheader: new(sessionV2.RequestVerificationHeader),
}
checkOwner(t, req, nil, errEmptyBodySig)
})
t.Run("no tokens", func(t *testing.T) {
req := Metadata{
VerificationHeader: vh,
req := MetaWithToken{
vheader: vh,
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer without impersonate, no session", func(t *testing.T) {
req := Metadata{
VerificationHeader: vh,
BearerToken: newBearer(t, containerOwner, userID, false),
req := MetaWithToken{
vheader: vh,
bearer: newBearer(t, containerOwner, userID, false),
}
checkOwner(t, req, userPk.PublicKey(), nil)
})
t.Run("bearer with impersonate, no session", func(t *testing.T) {
req := Metadata{
VerificationHeader: vh,
BearerToken: newBearer(t, containerOwner, userID, true),
req := MetaWithToken{
vheader: vh,
bearer: newBearer(t, containerOwner, userID, true),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
req := Metadata{
VerificationHeader: vh,
BearerToken: newBearer(t, containerOwner, userID, true),
SessionToken: newSession(t, pk),
req := MetaWithToken{
vheader: vh,
bearer: newBearer(t, containerOwner, userID, true),
token: newSession(t, pk),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
t.Run("with session", func(t *testing.T) {
req := Metadata{
VerificationHeader: vh,
SessionToken: newSession(t, containerOwner),
req := MetaWithToken{
vheader: vh,
token: newSession(t, containerOwner),
}
checkOwner(t, req, containerOwner.PublicKey(), nil)
})
@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
var tok sessionSDK.Object
require.NoError(t, tok.ReadFromV2(tokV2))
req := Metadata{
VerificationHeader: vh,
SessionToken: &tok,
req := MetaWithToken{
vheader: vh,
token: &tok,
}
checkOwner(t, req, nil, errInvalidSessionOwner)
})
@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
return &tok
}
func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
_, actual, err := req.RequestOwner()
if expectedErr != nil {
require.ErrorIs(t, err, expectedErr)

View file

@ -0,0 +1,779 @@
package v2
import (
"context"
"errors"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
)
// Service checks basic ACL rules.
type Service struct {
*cfg
c objectCore.SenderClassifier
}
type putStreamBasicChecker struct {
source *Service
next object.PutObjectStream
}
type patchStreamBasicChecker struct {
source *Service
next object.PatchObjectStream
nonFirstSend bool
}
// Option represents Service constructor option.
type Option func(*cfg)
type cfg struct {
log *logger.Logger
containers container.Source
irFetcher InnerRingFetcher
nm netmap.Source
next object.ServiceServer
}
// New is a constructor for object ACL checking service.
func New(next object.ServiceServer,
nm netmap.Source,
irf InnerRingFetcher,
cs container.Source,
opts ...Option,
) Service {
cfg := &cfg{
log: logger.NewLoggerWrapper(zap.L()),
next: next,
nm: nm,
irFetcher: irf,
containers: cs,
}
for i := range opts {
opts[i](cfg)
}
return Service{
cfg: cfg,
c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
}
}
// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
type wrappedGetObjectStream struct {
object.GetObjectStream
requestInfo RequestInfo
}
func (w *wrappedGetObjectStream) Context() context.Context {
return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
Namespace: w.requestInfo.ContainerNamespace(),
ContainerOwner: w.requestInfo.ContainerOwner(),
SenderKey: w.requestInfo.SenderKey(),
Role: w.requestInfo.RequestRole(),
BearerToken: w.requestInfo.Bearer(),
})
}
func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
return &wrappedGetObjectStream{
GetObjectStream: getObjectStream,
requestInfo: reqInfo,
}
}
// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
type wrappedRangeStream struct {
object.GetObjectRangeStream
requestInfo RequestInfo
}
func (w *wrappedRangeStream) Context() context.Context {
return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
Namespace: w.requestInfo.ContainerNamespace(),
ContainerOwner: w.requestInfo.ContainerOwner(),
SenderKey: w.requestInfo.SenderKey(),
Role: w.requestInfo.RequestRole(),
BearerToken: w.requestInfo.Bearer(),
})
}
func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
return &wrappedRangeStream{
GetObjectRangeStream: rangeStream,
requestInfo: reqInfo,
}
}
// wrappedSearchStream propagates RequestContext into SearchStream's context.
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
type wrappedSearchStream struct {
object.SearchStream
requestInfo RequestInfo
}
func (w *wrappedSearchStream) Context() context.Context {
return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
Namespace: w.requestInfo.ContainerNamespace(),
ContainerOwner: w.requestInfo.ContainerOwner(),
SenderKey: w.requestInfo.SenderKey(),
Role: w.requestInfo.RequestRole(),
BearerToken: w.requestInfo.Bearer(),
})
}
func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
return &wrappedSearchStream{
SearchStream: searchStream,
requestInfo: reqInfo,
}
}
// Get implements ServiceServer interface, makes ACL checks and calls
// next Get method in the ServiceServer pipeline.
func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
}
obj, err := getObjectIDFromRequestBody(request.GetBody())
if err != nil {
return err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return err
}
if sTok != nil {
err = assertSessionRelation(*sTok, cnr, obj)
if err != nil {
return err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet)
if err != nil {
return err
}
reqInfo.obj = obj
return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo))
}
func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) {
streamer, err := b.next.Put(ctx)
return putStreamBasicChecker{
source: &b,
next: streamer,
}, err
}
func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) {
streamer, err := b.next.Patch(ctx)
return &patchStreamBasicChecker{
source: &b,
next: streamer,
}, err
}
func (b Service) Head(
ctx context.Context,
request *objectV2.HeadRequest,
) (*objectV2.HeadResponse, error) {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
}
obj, err := getObjectIDFromRequestBody(request.GetBody())
if err != nil {
return nil, err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
if sTok != nil {
err = assertSessionRelation(*sTok, cnr, obj)
if err != nil {
return nil, err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead)
if err != nil {
return nil, err
}
reqInfo.obj = obj
return b.next.Head(requestContext(ctx, reqInfo), request)
}
func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
id, err := getContainerIDFromRequest(request)
if err != nil {
return err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return err
}
if sTok != nil {
err = assertSessionRelation(*sTok, id, nil)
if err != nil {
return err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch)
if err != nil {
return err
}
return b.next.Search(request, newWrappedSearchStream(stream, reqInfo))
}
func (b Service) Delete(
ctx context.Context,
request *objectV2.DeleteRequest,
) (*objectV2.DeleteResponse, error) {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
}
obj, err := getObjectIDFromRequestBody(request.GetBody())
if err != nil {
return nil, err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
if sTok != nil {
err = assertSessionRelation(*sTok, cnr, obj)
if err != nil {
return nil, err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete)
if err != nil {
return nil, err
}
reqInfo.obj = obj
return b.next.Delete(requestContext(ctx, reqInfo), request)
}
func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
}
obj, err := getObjectIDFromRequestBody(request.GetBody())
if err != nil {
return err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return err
}
if sTok != nil {
err = assertSessionRelation(*sTok, cnr, obj)
if err != nil {
return err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange)
if err != nil {
return err
}
reqInfo.obj = obj
return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo))
}
func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
Namespace: reqInfo.ContainerNamespace(),
ContainerOwner: reqInfo.ContainerOwner(),
SenderKey: reqInfo.SenderKey(),
Role: reqInfo.RequestRole(),
BearerToken: reqInfo.Bearer(),
})
}
func (b Service) GetRangeHash(
ctx context.Context,
request *objectV2.GetRangeHashRequest,
) (*objectV2.GetRangeHashResponse, error) {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
}
obj, err := getObjectIDFromRequestBody(request.GetBody())
if err != nil {
return nil, err
}
sTok, err := originalSessionToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
if sTok != nil {
err = assertSessionRelation(*sTok, cnr, obj)
if err != nil {
return nil, err
}
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash)
if err != nil {
return nil, err
}
reqInfo.obj = obj
return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
}
func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
}
idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
if idV2 == nil {
return nil, errors.New("missing object owner")
}
var idOwner user.ID
err = idOwner.ReadFromV2(*idV2)
if err != nil {
return nil, fmt.Errorf("invalid object owner: %w", err)
}
obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
var sTok *sessionSDK.Object
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
if err != nil {
return nil, err
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return nil, err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut)
if err != nil {
return nil, err
}
reqInfo.obj = obj
return b.next.PutSingle(requestContext(ctx, reqInfo), request)
}
func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
body := request.GetBody()
if body == nil {
return errEmptyBody
}
part := body.GetObjectPart()
if part, ok := part.(*objectV2.PutObjectPartInit); ok {
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
}
idV2 := part.GetHeader().GetOwnerID()
if idV2 == nil {
return errors.New("missing object owner")
}
var idOwner user.ID
err = idOwner.ReadFromV2(*idV2)
if err != nil {
return fmt.Errorf("invalid object owner: %w", err)
}
objV2 := part.GetObjectID()
var obj *oid.ID
if objV2 != nil {
obj = new(oid.ID)
err = obj.ReadFromV2(*objV2)
if err != nil {
return err
}
}
var sTok *sessionSDK.Object
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
if err != nil {
return err
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut)
if err != nil {
return err
}
reqInfo.obj = obj
ctx = requestContext(ctx, reqInfo)
}
return p.next.Send(ctx, request)
}
func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
var sTok *sessionSDK.Object
if tokV2 != nil {
sTok = new(sessionSDK.Object)
err := sTok.ReadFromV2(*tokV2)
if err != nil {
return nil, fmt.Errorf("invalid session token: %w", err)
}
if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
// if session relates to object's removal, we don't check
// relation of the tombstone to the session here since user
// can't predict tomb's ID.
err = assertSessionRelation(*sTok, cnr, nil)
} else {
err = assertSessionRelation(*sTok, cnr, obj)
}
if err != nil {
return nil, err
}
}
return sTok, nil
}
func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
return p.next.CloseAndRecv(ctx)
}
func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
body := request.GetBody()
if body == nil {
return errEmptyBody
}
if !p.nonFirstSend {
p.nonFirstSend = true
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
}
objV2 := request.GetBody().GetAddress().GetObjectID()
if objV2 == nil {
return errors.New("missing oid")
}
obj := new(oid.ID)
err = obj.ReadFromV2(*objV2)
if err != nil {
return err
}
var sTok *sessionSDK.Object
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
if err != nil {
return err
}
bTok, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return err
}
req := MetaWithToken{
vheader: request.GetVerificationHeader(),
token: sTok,
bearer: bTok,
src: request,
}
reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr)
if err != nil {
return err
}
reqInfo.obj = obj
ctx = requestContext(ctx, reqInfo)
}
return p.next.Send(ctx, request)
}
func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
return p.next.CloseAndRecv(ctx)
}
func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container
if err != nil {
return info, err
}
if req.token != nil {
currentEpoch, err := b.nm.Epoch(ctx)
if err != nil {
return info, errors.New("can't fetch current epoch")
}
if req.token.ExpiredAt(currentEpoch) {
return info, new(apistatus.SessionTokenExpired)
}
if req.token.InvalidAt(currentEpoch) {
return info, fmt.Errorf("%s: token is invalid at %d epoch)",
invalidRequestMessage, currentEpoch)
}
if !assertVerb(*req.token, op) {
return info, errInvalidVerb
}
}
// find request role and key
ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return info, err
}
res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value)
if err != nil {
return info, err
}
info.basicACL = cnr.Value.BasicACL()
info.requestRole = res.Role
info.operation = op
info.cnrOwner = cnr.Value.Owner()
info.idCnr = idCnr
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
if hasNamespace {
info.cnrNamespace = cnrNamespace
}
// it is assumed that at the moment the key will be valid,
// otherwise the request would not pass validation
info.senderKey = res.Key
// add bearer token if it is present in request
info.bearer = req.bearer
info.srcRequest = req.src
return info, nil
}
// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container
if err != nil {
return info, err
}
if req.token != nil {
currentEpoch, err := b.nm.Epoch(ctx)
if err != nil {
return info, errors.New("can't fetch current epoch")
}
if req.token.ExpiredAt(currentEpoch) {
return info, new(apistatus.SessionTokenExpired)
}
if req.token.InvalidAt(currentEpoch) {
return info, fmt.Errorf("%s: token is invalid at %d epoch)",
invalidRequestMessage, currentEpoch)
}
}
// find request role and key
ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return info, err
}
res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value)
if err != nil {
return info, err
}
info.basicACL = cnr.Value.BasicACL()
info.requestRole = res.Role
info.cnrOwner = cnr.Value.Owner()
info.idCnr = idCnr
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
if hasNamespace {
info.cnrNamespace = cnrNamespace
}
// it is assumed that at the moment the key will be valid,
// otherwise the request would not pass validation
info.senderKey = res.Key
// add bearer token if it is present in request
info.bearer = req.bearer
info.srcRequest = req.src
return info, nil
}

View file

@ -0,0 +1,11 @@
package v2
import "context"
// InnerRingFetcher is an interface that must provide
// Inner Ring information.
type InnerRingFetcher interface {
// InnerRingKeys must return list of public keys of
// the actual inner ring.
InnerRingKeys(ctx context.Context) ([][]byte, error)
}

View file

@ -1,4 +1,4 @@
package ape
package v2
import (
"crypto/ecdsa"
@ -6,34 +6,57 @@ import (
"errors"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
if cidV2 != nil {
if err = cnrID.ReadFromV2(*cidV2); err != nil {
return
var errMissingContainerID = errors.New("missing container ID")
func getContainerIDFromRequest(req any) (cid.ID, error) {
var idV2 *refsV2.ContainerID
var id cid.ID
switch v := req.(type) {
case *objectV2.GetRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.PutRequest:
part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
if !ok {
return cid.ID{}, errors.New("can't get container ID in chunk")
}
} else {
err = errMissingContainerID
return
idV2 = part.GetHeader().GetContainerID()
case *objectV2.HeadRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.SearchRequest:
idV2 = v.GetBody().GetContainerID()
case *objectV2.DeleteRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.GetRangeRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.GetRangeHashRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
case *objectV2.PutSingleRequest:
idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
case *objectV2.PatchRequest:
idV2 = v.GetBody().GetAddress().GetContainerID()
default:
return cid.ID{}, errors.New("unknown request type")
}
if objV2 != nil {
objID = new(oid.ID)
if err = objID.ReadFromV2(*objV2); err != nil {
return
}
if idV2 == nil {
return cid.ID{}, errMissingContainerID
}
return
return id, id.ReadFromV2(*idV2)
}
// originalBearerToken goes down to original request meta header and fetches
@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
return &tok, tok.ReadFromV2(*tokV2)
}
// originalSessionToken goes down to original request meta header and fetches
// session token from there.
func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
for header.GetOrigin() != nil {
header = header.GetOrigin()
}
tokV2 := header.GetSessionToken()
if tokV2 == nil {
return nil, nil
}
var tok sessionSDK.Object
err := tok.ReadFromV2(*tokV2)
if err != nil {
return nil, fmt.Errorf("invalid session token: %w", err)
}
return &tok, nil
}
// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
// object reference's holders. Returns an error if object ID is missing in the request.
func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
idV2 := body.GetAddress().GetObjectID()
return getObjectIDFromRefObjectID(idV2)
}
func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
if idV2 == nil {
return nil, errors.New("missing object ID")
}
var id oid.ID
err := id.ReadFromV2(*idV2)
if err != nil {
return nil, err
}
return &id, nil
}
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
// 1. First check signature of session token.
if !token.VerifySignature() {
@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
return id2.Equals(id)
}
// assertVerb checks that token verb corresponds to the method.
func assertVerb(tok sessionSDK.Object, method string) bool {
switch method {
case nativeschema.MethodPutObject:
// assertVerb checks that token verb corresponds to op.
func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
switch op {
case acl.OpObjectPut:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
case nativeschema.MethodDeleteObject:
case acl.OpObjectDelete:
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
case nativeschema.MethodGetObject:
case acl.OpObjectGet:
return tok.AssertVerb(sessionSDK.VerbObjectGet)
case nativeschema.MethodHeadObject:
case acl.OpObjectHead:
return tok.AssertVerb(
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool {
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
)
case nativeschema.MethodSearchObject:
case acl.OpObjectSearch:
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
case nativeschema.MethodRangeObject:
case acl.OpObjectRange:
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
case nativeschema.MethodHashObject:
case acl.OpObjectHash:
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
case nativeschema.MethodPatchObject:
return tok.AssertVerb(sessionSDK.VerbObjectPatch)
}
return false
}
@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
return nil
}
func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
key, err := unmarshalPublicKey(rawKey)
if err != nil {
return nil, nil, fmt.Errorf("invalid signature key: %w", err)
}
var idSender user.ID
user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
return &idSender, key, nil
}

View file

@ -0,0 +1,131 @@
package v2
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"slices"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
"github.com/stretchr/testify/require"
)
func TestOriginalTokens(t *testing.T) {
sToken := sessiontest.ObjectSigned()
bToken := bearertest.Token()
pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, bToken.Sign(*pk))
var bTokenV2 acl.BearerToken
bToken.WriteToV2(&bTokenV2)
// This line is needed because SDK uses some custom format for
// reserved filters, so `cid.ID` is not converted to string immediately.
require.NoError(t, bToken.ReadFromV2(bTokenV2))
var sTokenV2 session.Token
sToken.WriteToV2(&sTokenV2)
for i := range 10 {
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
res, err := originalSessionToken(metaHeaders)
require.NoError(t, err)
require.Equal(t, sToken, res, i)
bTok, err := originalBearerToken(metaHeaders)
require.NoError(t, err)
require.Equal(t, &bToken, bTok, i)
}
}
func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
metaHeader := new(session.RequestMetaHeader)
metaHeader.SetBearerToken(b)
metaHeader.SetSessionToken(s)
for range depth {
link := metaHeader
metaHeader = new(session.RequestMetaHeader)
metaHeader.SetOrigin(link)
}
return metaHeader
}
func TestIsVerbCompatible(t *testing.T) {
// Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
aclsdk.OpObjectHead: {
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectRange,
sessionSDK.VerbObjectRangeHash,
},
aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
}
verbs := []sessionSDK.ObjectVerb{
sessionSDK.VerbObjectPut,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectRange,
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectSearch,
}
var tok sessionSDK.Object
for op, list := range table {
for _, verb := range verbs {
contains := slices.Contains(list, verb)
tok.ForVerb(verb)
require.Equal(t, contains, assertVerb(tok, op),
"%v in token, %s executing", verb, op)
}
}
}
func TestAssertSessionRelation(t *testing.T) {
var tok sessionSDK.Object
cnr := cidtest.ID()
cnrOther := cidtest.ID()
obj := oidtest.ID()
objOther := oidtest.ID()
// make sure ids differ, otherwise test won't work correctly
require.False(t, cnrOther.Equals(cnr))
require.False(t, objOther.Equals(obj))
// bind session to the container (required)
tok.BindContainer(cnr)
// test container-global session
require.NoError(t, assertSessionRelation(tok, cnr, nil))
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
require.Error(t, assertSessionRelation(tok, cnrOther, nil))
require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
// limit the session to the particular object
tok.LimitByObjects(obj)
// test fixed object session (here obj arg must be non-nil everywhere)
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
require.Error(t, assertSessionRelation(tok, cnr, &objOther))
}

View file

@ -7,21 +7,6 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
var (
errMissingContainerID = malformedRequestError("missing container ID")
errEmptyVerificationHeader = malformedRequestError("empty verification header")
errEmptyBodySig = malformedRequestError("empty at body signature")
errInvalidSessionSig = malformedRequestError("invalid session token signature")
errInvalidSessionOwner = malformedRequestError("invalid session token owner")
errInvalidVerb = malformedRequestError("session token verb is invalid")
)
func malformedRequestError(reason string) error {
invalidArgErr := &apistatus.InvalidArgument{}
invalidArgErr.SetMessage(reason)
return invalidArgErr
}
func toStatusErr(err error) error {
var chRouterErr *checkercore.ChainRouterError
if !errors.As(err, &chRouterErr) {

View file

@ -1,172 +0,0 @@
package ape
import (
"context"
"encoding/hex"
"errors"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type Metadata struct {
Container cid.ID
Object *oid.ID
MetaHeader *session.RequestMetaHeader
VerificationHeader *session.RequestVerificationHeader
SessionToken *sessionSDK.Object
BearerToken *bearer.Token
}
func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
if m.VerificationHeader == nil {
return nil, nil, errEmptyVerificationHeader
}
if m.BearerToken != nil && m.BearerToken.Impersonate() {
return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
}
// if session token is presented, use it as truth source
if m.SessionToken != nil {
// verify signature of session token
return ownerFromToken(m.SessionToken)
}
// otherwise get original body signature
bodySignature := originalBodySignature(m.VerificationHeader)
if bodySignature == nil {
return nil, nil, errEmptyBodySig
}
return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
}
// RequestInfo contains request information extracted by request metadata.
type RequestInfo struct {
// Role defines under which role this request is executed.
// It must be represented only as a constant represented in native schema.
Role string
ContainerOwner user.ID
// Namespace defines to which namespace a container is belonged.
Namespace string
// HEX-encoded sender key.
SenderKey string
}
type RequestInfoExtractor interface {
GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
}
type extractor struct {
containers container.Source
nm netmap.Source
classifier objectCore.SenderClassifier
}
func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
return &extractor{
containers: containers,
nm: nm,
classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
}
}
func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
currentEpoch, err := e.nm.Epoch(ctx)
if err != nil {
return errors.New("can't fetch current epoch")
}
if sessionToken.ExpiredAt(currentEpoch) {
return new(apistatus.SessionTokenExpired)
}
if sessionToken.InvalidAt(currentEpoch) {
return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
}
if !assertVerb(*sessionToken, method) {
return errInvalidVerb
}
return nil
}
func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
cnr, err := e.containers.Get(ctx, m.Container)
if err != nil {
return ri, err
}
if m.SessionToken != nil {
if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
return ri, err
}
}
ownerID, ownerKey, err := m.RequestOwner()
if err != nil {
return ri, err
}
res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
if err != nil {
return ri, err
}
ri.Role = nativeSchemaRole(res.Role)
ri.ContainerOwner = cnr.Value.Owner()
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
if hasNamespace {
ri.Namespace = cnrNamespace
}
// it is assumed that at the moment the key will be valid,
// otherwise the request would not pass validation
ri.SenderKey = hex.EncodeToString(res.Key)
return ri, nil
}
func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
var sTok *sessionSDK.Object
if tokV2 != nil {
sTok = new(sessionSDK.Object)
err := sTok.ReadFromV2(*tokV2)
if err != nil {
return nil, fmt.Errorf("invalid session token: %w", err)
}
if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
// if session relates to object's removal, we don't check
// relation of the tombstone to the session here since user
// can't predict tomb's ID.
err = assertSessionRelation(*sTok, cnr, nil)
} else {
err = assertSessionRelation(*sTok, cnr, obj)
}
if err != nil {
return nil, err
}
}
return sTok, nil
}

View file

@ -2,6 +2,9 @@ package ape
import (
"context"
"encoding/hex"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@ -9,18 +12,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
type Service struct {
apeChecker Checker
extractor RequestInfoExtractor
next objectSvc.ServiceServer
}
@ -60,10 +64,9 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{
apeChecker: apeChecker,
extractor: extractor,
next: next,
}
}
@ -73,9 +76,15 @@ type getStreamBasicChecker struct {
apeChecker Checker
metadata Metadata
namespace string
reqInfo RequestInfo
senderKey []byte
containerOwner user.ID
role string
bearerToken *bearer.Token
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
@ -86,15 +95,15 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
}
prm := Prm{
Namespace: g.reqInfo.Namespace,
Namespace: g.namespace,
Container: cnrID,
Object: objID,
Header: partInit.GetHeader(),
Method: nativeschema.MethodGetObject,
SenderKey: g.reqInfo.SenderKey,
ContainerOwner: g.reqInfo.ContainerOwner,
Role: g.reqInfo.Role,
BearerToken: g.metadata.BearerToken,
SenderKey: hex.EncodeToString(g.senderKey),
ContainerOwner: g.containerOwner,
Role: g.role,
BearerToken: g.bearerToken,
XHeaders: resp.GetMetaHeader().GetXHeaders(),
}
@ -105,53 +114,69 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
return g.GetObjectStream.Send(resp)
}
func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
untyped := ctx.Value(objectSvc.RequestContextKey)
if untyped == nil {
return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
}
rc, ok := untyped.(*objectSvc.RequestContext)
if !ok {
return nil, errFailedToCastToRequestContext
}
return rc, nil
}
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
reqCtx, err := requestContext(stream.Context())
if err != nil {
return err
}
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
if err != nil {
return err
return toStatusErr(err)
}
return c.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
apeChecker: c.apeChecker,
metadata: md,
reqInfo: reqInfo,
namespace: reqCtx.Namespace,
senderKey: reqCtx.SenderKey,
containerOwner: reqCtx.ContainerOwner,
role: nativeSchemaRole(reqCtx.Role),
bearerToken: reqCtx.BearerToken,
})
}
type putStreamBasicChecker struct {
apeChecker Checker
extractor RequestInfoExtractor
next objectSvc.PutObjectStream
}
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
reqCtx, err := requestContext(ctx)
if err != nil {
return err
return toStatusErr(err)
}
reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
if err != nil {
return err
return toStatusErr(err)
}
prm := Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Header: partInit.GetHeader(),
Method: nativeschema.MethodPutObject,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
Role: reqInfo.Role,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
Role: nativeSchemaRole(reqCtx.Role),
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@ -171,7 +196,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
return &putStreamBasicChecker{
apeChecker: c.apeChecker,
extractor: c.extractor,
next: streamer,
}, err
}
@ -179,36 +203,40 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
type patchStreamBasicChecker struct {
apeChecker Checker
extractor RequestInfoExtractor
next objectSvc.PatchObjectStream
nonFirstSend bool
}
func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
if !p.nonFirstSend {
p.nonFirstSend = true
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
reqCtx, err := requestContext(ctx)
if err != nil {
return err
return toStatusErr(err)
}
reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return err
return toStatusErr(err)
}
prm := Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Method: nativeschema.MethodPatchObject,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
Role: reqInfo.Role,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
Role: nativeSchemaRole(reqCtx.Role),
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
}
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
@ -228,17 +256,22 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error
return &patchStreamBasicChecker{
apeChecker: c.apeChecker,
extractor: c.extractor,
next: streamer,
}, err
}
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
@ -252,7 +285,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
case *objectV2.ShortHeader:
cidV2 := new(refs.ContainerID)
md.Container.WriteToV2(cidV2)
cnrID.WriteToV2(cidV2)
header.SetContainerID(cidV2)
header.SetVersion(headerPart.GetVersion())
header.SetCreationEpoch(headerPart.GetCreationEpoch())
@ -268,16 +301,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
err = c.apeChecker.CheckAPE(ctx, Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Header: header,
Method: nativeschema.MethodHeadObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@ -286,24 +319,32 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
}
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
if err != nil {
return err
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
var cnrID cid.ID
if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
if err := cnrID.ReadFromV2(*cnrV2); err != nil {
return toStatusErr(err)
}
}
reqCtx, err := requestContext(stream.Context())
if err != nil {
return err
return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Namespace: reqCtx.Namespace,
Container: cnrID,
Method: nativeschema.MethodSearchObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@ -313,25 +354,31 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
}
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
err = c.apeChecker.CheckAPE(ctx, Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Method: nativeschema.MethodDeleteObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
})
if err != nil {
return nil, toStatusErr(err)
@ -346,25 +393,31 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
}
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return err
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return err
return toStatusErr(err)
}
reqCtx, err := requestContext(stream.Context())
if err != nil {
return toStatusErr(err)
}
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Method: nativeschema.MethodRangeObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
})
if err != nil {
return toStatusErr(err)
@ -374,25 +427,31 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
}
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
if err != nil {
return nil, err
}
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Method: nativeschema.MethodHashObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
}
resp, err := c.next.GetRangeHash(ctx, request)
@ -407,26 +466,32 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
}
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
}
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
if err != nil {
return nil, err
}
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
reqCtx, err := requestContext(ctx)
if err != nil {
return nil, err
}
prm := Prm{
Namespace: reqInfo.Namespace,
Container: md.Container,
Object: md.Object,
Namespace: reqCtx.Namespace,
Container: cnrID,
Object: objID,
Header: request.GetBody().GetObject().GetHeader(),
Method: nativeschema.MethodPutObject,
Role: reqInfo.Role,
SenderKey: reqInfo.SenderKey,
ContainerOwner: reqInfo.ContainerOwner,
BearerToken: md.BearerToken,
XHeaders: md.MetaHeader.GetXHeaders(),
Role: nativeSchemaRole(reqCtx.Role),
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
BearerToken: reqCtx.BearerToken,
XHeaders: meta.GetXHeaders(),
}
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
@ -436,36 +501,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
return c.next.PutSingle(ctx, request)
}
type request interface {
GetMetaHeader() *session.RequestMetaHeader
GetVerificationHeader() *session.RequestVerificationHeader
}
func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
meta := request.GetMetaHeader()
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
meta = origin
func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
if cidV2 != nil {
if err = cnrID.ReadFromV2(*cidV2); err != nil {
return
}
}
cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
if err != nil {
return
}
session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
if err != nil {
return
}
bearer, err := originalBearerToken(request.GetMetaHeader())
if err != nil {
return
}
md = Metadata{
Container: cnrID,
Object: objID,
VerificationHeader: request.GetVerificationHeader(),
SessionToken: session,
BearerToken: bearer,
if objV2 != nil {
objID = new(oid.ID)
if err = objID.ReadFromV2(*objV2); err != nil {
return
}
}
return
}

View file

@ -7,11 +7,3 @@ import "context"
type Checker interface {
CheckAPE(context.Context, Prm) error
}
// InnerRingFetcher is an interface that must provide
// Inner Ring information.
type InnerRingFetcher interface {
// InnerRingKeys must return list of public keys of
// the actual inner ring.
InnerRingKeys(ctx context.Context) ([][]byte, error)
}

View file

@ -1,84 +0,0 @@
package ape
import (
"slices"
"testing"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/stretchr/testify/require"
)
func TestIsVerbCompatible(t *testing.T) {
table := map[string][]sessionSDK.ObjectVerb{
nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
nativeschema.MethodHeadObject: {
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectRange,
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectPatch,
},
nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
}
verbs := []sessionSDK.ObjectVerb{
sessionSDK.VerbObjectPut,
sessionSDK.VerbObjectDelete,
sessionSDK.VerbObjectHead,
sessionSDK.VerbObjectRange,
sessionSDK.VerbObjectRangeHash,
sessionSDK.VerbObjectGet,
sessionSDK.VerbObjectSearch,
sessionSDK.VerbObjectPatch,
}
var tok sessionSDK.Object
for op, list := range table {
for _, verb := range verbs {
contains := slices.Contains(list, verb)
tok.ForVerb(verb)
require.Equal(t, contains, assertVerb(tok, op),
"%v in token, %s executing", verb, op)
}
}
}
func TestAssertSessionRelation(t *testing.T) {
var tok sessionSDK.Object
cnr := cidtest.ID()
cnrOther := cidtest.ID()
obj := oidtest.ID()
objOther := oidtest.ID()
// make sure ids differ, otherwise test won't work correctly
require.False(t, cnrOther.Equals(cnr))
require.False(t, objOther.Equals(obj))
// bind session to the container (required)
tok.BindContainer(cnr)
// test container-global session
require.NoError(t, assertSessionRelation(tok, cnr, nil))
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
require.Error(t, assertSessionRelation(tok, cnrOther, nil))
require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
// limit the session to the particular object
tok.LimitByObjects(obj)
// test fixed object session (here obj arg must be non-nil everywhere)
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
require.Error(t, assertSessionRelation(tok, cnr, &objOther))
}

View file

@ -130,7 +130,7 @@ func TestECWriter(t *testing.T) {
nodeKey, err := keys.NewPrivateKey()
require.NoError(t, err)
log, err := logger.NewLogger(logger.Prm{})
log, err := logger.NewLogger(nil)
require.NoError(t, err)
var n nmKeys

View file

@ -195,12 +195,7 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error {
patch.FromV2(req.GetBody())
if !s.nonFirstSend {
err := s.patcher.ApplyHeaderPatch(ctx,
patcher.ApplyHeaderPatchPrm{
NewSplitHeader: patch.NewSplitHeader,
NewAttributes: patch.NewAttributes,
ReplaceAttributes: patch.ReplaceAttributes,
})
err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes)
if err != nil {
return fmt.Errorf("patch attributes: %w", err)
}

View file

@ -0,0 +1,24 @@
package object
import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
type RequestContextKeyT struct{}
var RequestContextKey = RequestContextKeyT{}
// RequestContext is a context passed between middleware handlers.
type RequestContext struct {
Namespace string
SenderKey []byte
ContainerOwner user.ID
Role acl.Role
BearerToken *bearer.Token
}

View file

@ -9,10 +9,15 @@ import (
"time"
internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
)
type clientCache struct {
@ -90,13 +95,27 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*
return nil, err
}
cc, err := createConnection(netAddr, grpc.WithContextDialer(c.ds.GrpcContextDialer()))
if err != nil {
return nil, err
opts := []grpc.DialOption{
grpc.WithChainUnaryInterceptor(
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
tagging.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
tagging.NewStreamClientInterceptor(),
),
grpc.WithContextDialer(c.ds.GrpcContextDialer()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
}
ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
defer cancel()
if !netAddr.IsTLSEnabled() {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
req := &HealthcheckRequest{
Body: &HealthcheckRequest_Body{},
@ -105,6 +124,13 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*
return nil, err
}
cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
defer cancel()
// perform some request to check connection
if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
_ = cc.Close()

View file

@ -85,7 +85,7 @@ func New(opts ...Option) *Service {
// Start starts the service.
func (s *Service) Start(ctx context.Context) {
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
go s.replicateLoop(ctx)
go s.syncLoop(ctx)

View file

@ -3,7 +3,6 @@ package tree
import (
"context"
"crypto/sha256"
"crypto/tls"
"errors"
"fmt"
"io"
@ -23,14 +22,12 @@ import (
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
@ -304,7 +301,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return false
}
cc, err := createConnection(a, grpc.WithContextDialer(s.ds.GrpcContextDialer()))
cc, err := s.createConnection(a)
if err != nil {
s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
return false
@ -342,23 +339,13 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
host, isTLS, err := client.ParseURI(a.URIAddr())
if err != nil {
return nil, err
}
creds := insecure.NewCredentials()
if isTLS {
creds = credentials.NewTLS(&tls.Config{})
}
defaultOpts := []grpc.DialOption{
func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
return grpc.NewClient(a.URIAddr(),
grpc.WithChainUnaryInterceptor(
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
metrics.NewUnaryClientInterceptor(),
tracing_grpc.NewUnaryClientInterceptor(),
tagging.NewUnaryClientInterceptor(),
tracing_grpc.NewUnaryClientInteceptor(),
tagging.NewUnaryClientInteceptor(),
),
grpc.WithChainStreamInterceptor(
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
@ -366,12 +353,10 @@ func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientC
tracing_grpc.NewStreamClientInterceptor(),
tagging.NewStreamClientInterceptor(),
),
grpc.WithTransportCredentials(creds),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
grpc.WithDisableServiceConfig(),
}
return grpc.NewClient(host, append(defaultOpts, opts...)...)
)
}
// ErrAlreadySyncing is returned when a service synchronization has already

View file

@ -23,8 +23,16 @@ type Logger struct {
// Parameters that have been connected to the Logger support its
// configuration changing.
//
// See also Logger.Reload, SetLevelString.
// Passing Prm after a successful connection via the NewLogger, connects
// the Prm to a new instance of the Logger.
//
// See also Reload, SetLevelString.
type Prm struct {
// link to the created Logger
// instance; used for a runtime
// reconfiguration
_log *Logger
// support runtime rereading
level zapcore.Level
@ -36,9 +44,6 @@ type Prm struct {
// PrependTimestamp specifies whether to prepend a timestamp in the log
PrependTimestamp bool
// Options for zap.Logger
Options []zap.Option
}
const (
@ -68,6 +73,22 @@ func (p *Prm) SetDestination(d string) error {
return nil
}
// Reload reloads configuration of a connected instance of the Logger.
// Returns ErrLoggerNotConnected if no connection has been performed.
// Returns any reconfiguration error from the Logger directly.
func (p Prm) Reload() error {
if p._log == nil {
// incorrect logger usage
panic("parameters are not connected to any Logger")
}
return p._log.reload(p)
}
func defaultPrm() *Prm {
return new(Prm)
}
// NewLogger constructs a new zap logger instance. Constructing with nil
// parameters is safe: default values will be used then.
// Passing non-nil parameters after a successful creation (non-error) allows
@ -79,7 +100,10 @@ func (p *Prm) SetDestination(d string) error {
// - ISO8601 time encoding.
//
// Logger records a stack trace for all messages at or above fatal level.
func NewLogger(prm Prm) (*Logger, error) {
func NewLogger(prm *Prm) (*Logger, error) {
if prm == nil {
prm = defaultPrm()
}
switch prm.dest {
case DestinationUndefined, DestinationStdout:
return newConsoleLogger(prm)
@ -90,7 +114,7 @@ func NewLogger(prm Prm) (*Logger, error) {
}
}
func newConsoleLogger(prm Prm) (*Logger, error) {
func newConsoleLogger(prm *Prm) (*Logger, error) {
lvl := zap.NewAtomicLevelAt(prm.level)
c := zap.NewProductionConfig()
@ -106,22 +130,21 @@ func newConsoleLogger(prm Prm) (*Logger, error) {
c.EncoderConfig.TimeKey = ""
}
opts := []zap.Option{
lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
zap.AddCallerSkip(1),
}
opts = append(opts, prm.Options...)
lZap, err := c.Build(opts...)
)
if err != nil {
return nil, err
}
l := &Logger{z: lZap, lvl: lvl}
prm._log = l
return l, nil
}
func newJournaldLogger(prm Prm) (*Logger, error) {
func newJournaldLogger(prm *Prm) (*Logger, error) {
lvl := zap.NewAtomicLevelAt(prm.level)
c := zap.NewProductionConfig()
@ -155,20 +178,21 @@ func newJournaldLogger(prm Prm) (*Logger, error) {
c.Sampling.Thereafter,
samplerOpts...,
)
opts := []zap.Option{
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
zap.AddCallerSkip(1),
}
opts = append(opts, prm.Options...)
lZap := zap.New(samplingCore, opts...)
lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1))
l := &Logger{z: lZap, lvl: lvl}
prm._log = l
return l, nil
}
func (l *Logger) Reload(prm Prm) {
func (l *Logger) reload(prm Prm) error {
l.lvl.SetLevel(prm.level)
return nil
}
func (l *Logger) WithOptions(options ...zap.Option) {
l.z = l.z.WithOptions(options...)
}
func (l *Logger) With(fields ...zap.Field) *Logger {

View file

@ -1,36 +0,0 @@
package testing
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
var (
errInvalidDiff = errors.New("invalid diff")
errNetmapNotFound = errors.New("netmap not found")
)
type TestNetmapSource struct {
Netmaps map[uint64]*netmap.NetMap
CurrentEpoch uint64
}
func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) {
if diff >= s.CurrentEpoch {
return nil, errInvalidDiff
}
return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff)
}
func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.Netmaps[epoch]; found {
return nm, nil
}
return nil, errNetmapNotFound
}
func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) {
return s.CurrentEpoch, nil
}