Compare commits
4 commits
master
...
meta-list-
Author | SHA1 | Date | |
---|---|---|---|
72f0d1e24e | |||
80bd5efe3f | |||
1e660c6c61 | |||
7eb2f6d0e1 |
64 changed files with 3162 additions and 704 deletions
14
.ci/Jenkinsfile
vendored
14
.ci/Jenkinsfile
vendored
|
@ -68,14 +68,12 @@ async {
|
||||||
}
|
}
|
||||||
|
|
||||||
task('pre-commit') {
|
task('pre-commit') {
|
||||||
dockerfile("""
|
sh '''
|
||||||
FROM ${golangDefault}
|
apt update
|
||||||
RUN apt update && \
|
apt install -y --no-install-recommends pre-commit
|
||||||
apt install -y --no-install-recommends pre-commit
|
''' // TODO: Make an OCI image for pre-commit + golang? Unpack golang tarball with a library function?
|
||||||
""") {
|
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
|
||||||
withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) {
|
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
|
||||||
sh 'pre-commit run --color=always --hook-stage=manual --all-files'
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
36
Makefile
36
Makefile
|
@ -1,6 +1,5 @@
|
||||||
#!/usr/bin/make -f
|
#!/usr/bin/make -f
|
||||||
SHELL = bash
|
SHELL = bash
|
||||||
.SHELLFLAGS = -euo pipefail -c
|
|
||||||
|
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
|
@ -116,7 +115,7 @@ protoc:
|
||||||
# Install protoc
|
# Install protoc
|
||||||
protoc-install:
|
protoc-install:
|
||||||
@rm -rf $(PROTOBUF_DIR)
|
@rm -rf $(PROTOBUF_DIR)
|
||||||
@mkdir -p $(PROTOBUF_DIR)
|
@mkdir $(PROTOBUF_DIR)
|
||||||
@echo "⇒ Installing protoc... "
|
@echo "⇒ Installing protoc... "
|
||||||
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
@wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip'
|
||||||
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
@unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR)
|
||||||
|
@ -170,7 +169,7 @@ imports:
|
||||||
# Install gofumpt
|
# Install gofumpt
|
||||||
fumpt-install:
|
fumpt-install:
|
||||||
@rm -rf $(GOFUMPT_DIR)
|
@rm -rf $(GOFUMPT_DIR)
|
||||||
@mkdir -p $(GOFUMPT_DIR)
|
@mkdir $(GOFUMPT_DIR)
|
||||||
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
@GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION)
|
||||||
|
|
||||||
# Run gofumpt
|
# Run gofumpt
|
||||||
|
@ -187,37 +186,14 @@ test:
|
||||||
@echo "⇒ Running go test"
|
@echo "⇒ Running go test"
|
||||||
@GOFLAGS="$(GOFLAGS)" go test ./...
|
@GOFLAGS="$(GOFLAGS)" go test ./...
|
||||||
|
|
||||||
# Install Gerrit commit-msg hook
|
|
||||||
review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks
|
|
||||||
review-install:
|
|
||||||
@git config remote.review.url \
|
|
||||||
|| git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node
|
|
||||||
@mkdir -p $(GIT_HOOK_DIR)/
|
|
||||||
@curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg
|
|
||||||
@chmod +x $(GIT_HOOK_DIR)/commit-msg
|
|
||||||
@echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg
|
|
||||||
@chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg
|
|
||||||
|
|
||||||
# Create a PR in Gerrit
|
|
||||||
review: BRANCH ?= master
|
|
||||||
review:
|
|
||||||
@git push review HEAD:refs/for/$(BRANCH) \
|
|
||||||
--push-option r=e.stratonikov@yadro.com \
|
|
||||||
--push-option r=d.stepanov@yadro.com \
|
|
||||||
--push-option r=an.nikiforov@yadro.com \
|
|
||||||
--push-option r=a.arifullin@yadro.com \
|
|
||||||
--push-option r=ekaterina.lebedeva@yadro.com \
|
|
||||||
--push-option r=a.savchuk@yadro.com \
|
|
||||||
--push-option r=a.chuprov@yadro.com
|
|
||||||
|
|
||||||
# Run pre-commit
|
# Run pre-commit
|
||||||
pre-commit-run:
|
pre-commit-run:
|
||||||
@pre-commit run -a --hook-stage manual
|
@pre-commit run -a --hook-stage manual
|
||||||
|
|
||||||
# Install linters
|
# Install linters
|
||||||
lint-install: $(BIN)
|
lint-install:
|
||||||
@rm -rf $(OUTPUT_LINT_DIR)
|
@rm -rf $(OUTPUT_LINT_DIR)
|
||||||
@mkdir -p $(OUTPUT_LINT_DIR)
|
@mkdir $(OUTPUT_LINT_DIR)
|
||||||
@mkdir -p $(TMP_DIR)
|
@mkdir -p $(TMP_DIR)
|
||||||
@rm -rf $(TMP_DIR)/linters
|
@rm -rf $(TMP_DIR)/linters
|
||||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||||
|
@ -236,7 +212,7 @@ lint:
|
||||||
# Install staticcheck
|
# Install staticcheck
|
||||||
staticcheck-install:
|
staticcheck-install:
|
||||||
@rm -rf $(STATICCHECK_DIR)
|
@rm -rf $(STATICCHECK_DIR)
|
||||||
@mkdir -p $(STATICCHECK_DIR)
|
@mkdir $(STATICCHECK_DIR)
|
||||||
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
@GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION)
|
||||||
|
|
||||||
# Run staticcheck
|
# Run staticcheck
|
||||||
|
@ -249,7 +225,7 @@ staticcheck-run:
|
||||||
# Install gopls
|
# Install gopls
|
||||||
gopls-install:
|
gopls-install:
|
||||||
@rm -rf $(GOPLS_DIR)
|
@rm -rf $(GOPLS_DIR)
|
||||||
@mkdir -p $(GOPLS_DIR)
|
@mkdir $(GOPLS_DIR)
|
||||||
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
@GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION)
|
||||||
|
|
||||||
# Run gopls
|
# Run gopls
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete"
|
||||||
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
|
||||||
|
@ -40,6 +41,7 @@ func init() {
|
||||||
|
|
||||||
rootCmd.AddCommand(config.RootCmd)
|
rootCmd.AddCommand(config.RootCmd)
|
||||||
rootCmd.AddCommand(morph.RootCmd)
|
rootCmd.AddCommand(morph.RootCmd)
|
||||||
|
rootCmd.AddCommand(storagecfg.RootCmd)
|
||||||
rootCmd.AddCommand(metabase.RootCmd)
|
rootCmd.AddCommand(metabase.RootCmd)
|
||||||
|
|
||||||
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
|
||||||
|
|
135
cmd/frostfs-adm/internal/modules/storagecfg/config.go
Normal file
135
cmd/frostfs-adm/internal/modules/storagecfg/config.go
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
package storagecfg
|
||||||
|
|
||||||
|
const configTemplate = `logger:
|
||||||
|
level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
|
||||||
|
|
||||||
|
node:
|
||||||
|
wallet:
|
||||||
|
path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented
|
||||||
|
address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented
|
||||||
|
password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented
|
||||||
|
addresses: # list of addresses announced by Storage node in the Network map
|
||||||
|
- {{ .AnnouncedAddress }}
|
||||||
|
attribute_0: UN-LOCODE:{{ .Attribute.Locode }}
|
||||||
|
relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1 # total number of listener endpoints
|
||||||
|
0:
|
||||||
|
endpoint: {{ .Endpoint }} # endpoint for gRPC server
|
||||||
|
tls:{{if .TLSCert}}
|
||||||
|
enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2)
|
||||||
|
certificate: {{ .TLSCert }} # path to TLS certificate
|
||||||
|
key: {{ .TLSKey }} # path to TLS key
|
||||||
|
{{- else }}
|
||||||
|
enabled: false # disable TLS for a gRPC connection
|
||||||
|
{{- end}}
|
||||||
|
|
||||||
|
control:
|
||||||
|
authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service
|
||||||
|
{{- range .AuthorizedKeys }}
|
||||||
|
- {{.}}{{end}}
|
||||||
|
grpc:
|
||||||
|
endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service
|
||||||
|
|
||||||
|
morph:
|
||||||
|
dial_timeout: 20s # timeout for side chain NEO RPC client connection
|
||||||
|
cache_ttl: 15s # use TTL cache for side chain GET operations
|
||||||
|
rpc_endpoint: # side chain N3 RPC endpoints
|
||||||
|
{{- range .MorphRPC }}
|
||||||
|
- address: wss://{{.}}/ws{{end}}
|
||||||
|
{{if not .Relay }}
|
||||||
|
storage:
|
||||||
|
shard:
|
||||||
|
default: # section with the default shard parameters
|
||||||
|
metabase:
|
||||||
|
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||||
|
|
||||||
|
blobstor:
|
||||||
|
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||||
|
depth: 2 # max depth of object tree storage in FS
|
||||||
|
small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes
|
||||||
|
compress: true # turn on/off Zstandard compression (level 3) of stored objects
|
||||||
|
compression_exclude_content_types:
|
||||||
|
- audio/*
|
||||||
|
- video/*
|
||||||
|
|
||||||
|
blobovnicza:
|
||||||
|
size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||||
|
depth: 1 # max depth of object tree storage in key-value DB
|
||||||
|
width: 4 # max width of object tree storage in key-value DB
|
||||||
|
opened_cache_capacity: 50 # maximum number of opened database files
|
||||||
|
opened_cache_ttl: 5m # ttl for opened database file
|
||||||
|
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
|
||||||
|
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
||||||
|
remover_sleep_interval: 5m # frequency of the garbage collector invocation
|
||||||
|
0:
|
||||||
|
mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only"
|
||||||
|
|
||||||
|
metabase:
|
||||||
|
path: {{ .MetabasePath }} # path to the metabase
|
||||||
|
|
||||||
|
blobstor:
|
||||||
|
path: {{ .BlobstorPath }} # path to the blobstor
|
||||||
|
{{end}}`
|
||||||
|
|
||||||
|
const (
|
||||||
|
neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221"
|
||||||
|
balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55"
|
||||||
|
neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1"
|
||||||
|
balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf"
|
||||||
|
)
|
||||||
|
|
||||||
|
var n3config = map[string]struct {
|
||||||
|
MorphRPC []string
|
||||||
|
RPC []string
|
||||||
|
NeoFSContract string
|
||||||
|
BalanceContract string
|
||||||
|
}{
|
||||||
|
"testnet": {
|
||||||
|
MorphRPC: []string{
|
||||||
|
"rpc01.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc02.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc03.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc04.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc05.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc06.morph.testnet.fs.neo.org:51331",
|
||||||
|
"rpc07.morph.testnet.fs.neo.org:51331",
|
||||||
|
},
|
||||||
|
RPC: []string{
|
||||||
|
"rpc01.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc02.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc03.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc04.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc05.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc06.testnet.n3.nspcc.ru:21331",
|
||||||
|
"rpc07.testnet.n3.nspcc.ru:21331",
|
||||||
|
},
|
||||||
|
NeoFSContract: neofsTestnetAddress,
|
||||||
|
BalanceContract: balanceTestnetAddress,
|
||||||
|
},
|
||||||
|
"mainnet": {
|
||||||
|
MorphRPC: []string{
|
||||||
|
"rpc1.morph.fs.neo.org:40341",
|
||||||
|
"rpc2.morph.fs.neo.org:40341",
|
||||||
|
"rpc3.morph.fs.neo.org:40341",
|
||||||
|
"rpc4.morph.fs.neo.org:40341",
|
||||||
|
"rpc5.morph.fs.neo.org:40341",
|
||||||
|
"rpc6.morph.fs.neo.org:40341",
|
||||||
|
"rpc7.morph.fs.neo.org:40341",
|
||||||
|
},
|
||||||
|
RPC: []string{
|
||||||
|
"rpc1.n3.nspcc.ru:10331",
|
||||||
|
"rpc2.n3.nspcc.ru:10331",
|
||||||
|
"rpc3.n3.nspcc.ru:10331",
|
||||||
|
"rpc4.n3.nspcc.ru:10331",
|
||||||
|
"rpc5.n3.nspcc.ru:10331",
|
||||||
|
"rpc6.n3.nspcc.ru:10331",
|
||||||
|
"rpc7.n3.nspcc.ru:10331",
|
||||||
|
},
|
||||||
|
NeoFSContract: neofsMainnetAddress,
|
||||||
|
BalanceContract: balanceMainnetAddress,
|
||||||
|
},
|
||||||
|
}
|
432
cmd/frostfs-adm/internal/modules/storagecfg/root.go
Normal file
432
cmd/frostfs-adm/internal/modules/storagecfg/root.go
Normal file
|
@ -0,0 +1,432 @@
|
||||||
|
package storagecfg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
|
||||||
|
"github.com/chzyer/readline"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/input"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
walletFlag = "wallet"
|
||||||
|
accountFlag = "account"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultControlEndpoint = "localhost:8090"
|
||||||
|
defaultDataEndpoint = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RootCmd is a root command of config section.
|
||||||
|
var RootCmd = &cobra.Command{
|
||||||
|
Use: "storage-config [-w wallet] [-a acccount] [<path-to-config>]",
|
||||||
|
Short: "Section for storage node configuration commands",
|
||||||
|
Run: storageConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fs := RootCmd.Flags()
|
||||||
|
|
||||||
|
fs.StringP(walletFlag, "w", "", "Path to wallet")
|
||||||
|
fs.StringP(accountFlag, "a", "", "Wallet account")
|
||||||
|
}
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
AnnouncedAddress string
|
||||||
|
AuthorizedKeys []string
|
||||||
|
ControlEndpoint string
|
||||||
|
Endpoint string
|
||||||
|
TLSCert string
|
||||||
|
TLSKey string
|
||||||
|
MorphRPC []string
|
||||||
|
Attribute struct {
|
||||||
|
Locode string
|
||||||
|
}
|
||||||
|
Wallet struct {
|
||||||
|
Path string
|
||||||
|
Account string
|
||||||
|
Password string
|
||||||
|
}
|
||||||
|
Relay bool
|
||||||
|
BlobstorPath string
|
||||||
|
MetabasePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func storageConfig(cmd *cobra.Command, args []string) {
|
||||||
|
outPath := getOutputPath(args)
|
||||||
|
|
||||||
|
historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history")
|
||||||
|
readline.SetHistoryPath(historyPath)
|
||||||
|
|
||||||
|
var c config
|
||||||
|
|
||||||
|
c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag)
|
||||||
|
if c.Wallet.Path == "" {
|
||||||
|
c.Wallet.Path = getPath("Path to the storage node wallet: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
w, err := wallet.NewWalletFromFile(c.Wallet.Path)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
fillWalletAccount(cmd, &c, w)
|
||||||
|
|
||||||
|
accH, err := flags.ParseAddress(c.Wallet.Account)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
acc := w.GetAccount(accH)
|
||||||
|
if acc == nil {
|
||||||
|
fatalOnErr(errors.New("can't find account in wallet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes()))
|
||||||
|
|
||||||
|
network := readNetwork(cmd)
|
||||||
|
|
||||||
|
c.MorphRPC = n3config[network].MorphRPC
|
||||||
|
|
||||||
|
depositGas(cmd, acc, network)
|
||||||
|
|
||||||
|
c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ")
|
||||||
|
|
||||||
|
endpoint := getDefaultEndpoint(cmd, &c)
|
||||||
|
c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint))
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
c.Endpoint = endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint))
|
||||||
|
if c.ControlEndpoint == "" {
|
||||||
|
c.ControlEndpoint = defaultControlEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
c.TLSCert = getPath("TLS Certificate (optional): ")
|
||||||
|
if c.TLSCert != "" {
|
||||||
|
c.TLSKey = getPath("TLS Key: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ")
|
||||||
|
if !c.Relay {
|
||||||
|
p := getPath("Path to the storage directory (all available storage will be used): ")
|
||||||
|
c.BlobstorPath = filepath.Join(p, "blob")
|
||||||
|
c.MetabasePath = filepath.Join(p, "meta")
|
||||||
|
}
|
||||||
|
|
||||||
|
out := applyTemplate(c)
|
||||||
|
fatalOnErr(os.WriteFile(outPath, out, 0o644))
|
||||||
|
|
||||||
|
cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDefaultEndpoint(cmd *cobra.Command, c *config) string {
|
||||||
|
var addr, port string
|
||||||
|
for {
|
||||||
|
c.AnnouncedAddress = getString("Publicly announced address: ")
|
||||||
|
validator := netutil.Address{}
|
||||||
|
err := validator.FromString(c.AnnouncedAddress)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uriAddr, err := url.Parse(validator.URIAddr())
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
|
}
|
||||||
|
addr = uriAddr.Hostname()
|
||||||
|
port = uriAddr.Port()
|
||||||
|
ip, err := net.ResolveIPAddr("ip", addr)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Printf("Can't resolve IP address %s: %v\n", addr, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ip.IP.IsGlobalUnicast() {
|
||||||
|
cmd.Println("IP must be global unicast.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cmd.Printf("Resolved IP address: %s\n", ip.String())
|
||||||
|
|
||||||
|
_, err = strconv.ParseUint(port, 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
cmd.Println("Port must be an integer.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return net.JoinHostPort(defaultDataEndpoint, port)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) {
|
||||||
|
c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag)
|
||||||
|
if c.Wallet.Account == "" {
|
||||||
|
addr := address.Uint160ToString(w.GetChangeAddress())
|
||||||
|
c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr))
|
||||||
|
if c.Wallet.Account == "" {
|
||||||
|
c.Wallet.Account = addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readNetwork(cmd *cobra.Command) string {
|
||||||
|
var network string
|
||||||
|
for {
|
||||||
|
network = getString("Choose network [mainnet]/testnet: ")
|
||||||
|
switch network {
|
||||||
|
case "":
|
||||||
|
network = "mainnet"
|
||||||
|
case "testnet", "mainnet":
|
||||||
|
default:
|
||||||
|
cmd.Println(`Network must be either "mainnet" or "testnet"`)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return network
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOutputPath(args []string) string {
|
||||||
|
if len(args) != 0 {
|
||||||
|
return args[0]
|
||||||
|
}
|
||||||
|
outPath := getPath("File to write config at [./config.yml]: ")
|
||||||
|
if outPath == "" {
|
||||||
|
outPath = "./config.yml"
|
||||||
|
}
|
||||||
|
return outPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func getWalletAccount(w *wallet.Wallet, prompt string) string {
|
||||||
|
addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts))
|
||||||
|
for i := range w.Accounts {
|
||||||
|
addrs[i] = readline.PcItem(w.Accounts[i].Address)
|
||||||
|
}
|
||||||
|
|
||||||
|
readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...))
|
||||||
|
defer readline.SetAutoComplete(nil)
|
||||||
|
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
return strings.TrimSpace(s) // autocompleter can return a string with a trailing space
|
||||||
|
}
|
||||||
|
|
||||||
|
func getString(prompt string) string {
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
if s != "" {
|
||||||
|
_ = readline.AddHistory(s)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type filenameCompleter struct{}
|
||||||
|
|
||||||
|
func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) {
|
||||||
|
prefix := string(line[:pos])
|
||||||
|
dir := filepath.Dir(prefix)
|
||||||
|
de, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range de {
|
||||||
|
name := filepath.Join(dir, de[i].Name())
|
||||||
|
if strings.HasPrefix(name, prefix) {
|
||||||
|
tail := []rune(strings.TrimPrefix(name, prefix))
|
||||||
|
if de[i].IsDir() {
|
||||||
|
tail = append(tail, filepath.Separator)
|
||||||
|
}
|
||||||
|
newLine = append(newLine, tail)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos != 0 {
|
||||||
|
return newLine, pos - len([]rune(dir))
|
||||||
|
}
|
||||||
|
return newLine, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPath(prompt string) string {
|
||||||
|
readline.SetAutoComplete(filenameCompleter{})
|
||||||
|
defer readline.SetAutoComplete(nil)
|
||||||
|
|
||||||
|
p, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
if p == "" {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = readline.AddHistory(p)
|
||||||
|
|
||||||
|
abs, err := filepath.Abs(p)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return abs
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConfirmation(def bool, prompt string) bool {
|
||||||
|
for {
|
||||||
|
s, err := readline.Line(prompt)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "y", "yes":
|
||||||
|
return true
|
||||||
|
case "n", "no":
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
if len(s) == 0 {
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyTemplate(c config) []byte {
|
||||||
|
tmpl, err := template.New("config").Parse(configTemplate)
|
||||||
|
fatalOnErr(err)
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
fatalOnErr(tmpl.Execute(b, c))
|
||||||
|
|
||||||
|
return b.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalOnErr(err error) {
|
||||||
|
if err != nil {
|
||||||
|
_, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) {
|
||||||
|
sideClient := initClient(n3config[network].MorphRPC)
|
||||||
|
balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract)
|
||||||
|
|
||||||
|
sideActor, err := actor.NewSimple(sideClient, acc)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
sideGas := nep17.NewReader(sideActor, balanceHash)
|
||||||
|
accSH := acc.Contract.ScriptHash()
|
||||||
|
|
||||||
|
balance, err := sideGas.BalanceOf(accSH)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("side chain balance: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ",
|
||||||
|
fixedn.ToString(balance, 12)))
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
amountStr := getString("Enter amount in GAS: ")
|
||||||
|
amount, err := fixedn.FromString(amountStr, 8)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("invalid amount: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
mainClient := initClient(n3config[network].RPC)
|
||||||
|
neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract)
|
||||||
|
|
||||||
|
mainActor, err := actor.NewSimple(mainClient, acc)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
mainGas := nep17.New(mainActor, gas.Hash)
|
||||||
|
|
||||||
|
txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil)
|
||||||
|
if err != nil {
|
||||||
|
fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Print("Waiting for transactions to persist.")
|
||||||
|
tick := time.NewTicker(time.Second / 2)
|
||||||
|
defer tick.Stop()
|
||||||
|
|
||||||
|
timer := time.NewTimer(time.Second * 20)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
at := trigger.Application
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
_, err := mainClient.GetApplicationLog(txHash, &at)
|
||||||
|
if err == nil {
|
||||||
|
cmd.Print("\n")
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
cmd.Print(".")
|
||||||
|
case <-timer.C:
|
||||||
|
cmd.Printf("\nTimeout while waiting for transaction to persist.\n")
|
||||||
|
if getConfirmation(false, "Continue configuration? yes/[no]: ") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func initClient(rpc []string) *rpcclient.Client {
|
||||||
|
var c *rpcclient.Client
|
||||||
|
var err error
|
||||||
|
|
||||||
|
shuffled := slices.Clone(rpc)
|
||||||
|
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||||
|
|
||||||
|
for _, endpoint := range shuffled {
|
||||||
|
c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{
|
||||||
|
DialTimeout: time.Second * 2,
|
||||||
|
RequestTimeout: time.Second * 5,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err = c.Init(); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
fatalOnErr(fmt.Errorf("can't create N3 client: %w", err))
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
|
@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey
|
||||||
prmDial := client.PrmDial{
|
prmDial := client.PrmDial{
|
||||||
Endpoint: addr.URIAddr(),
|
Endpoint: addr.URIAddr(),
|
||||||
GRPCDialOptions: []grpc.DialOption{
|
GRPCDialOptions: []grpc.DialOption{
|
||||||
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()),
|
grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()),
|
||||||
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()),
|
||||||
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
grpc.WithDefaultCallOptions(grpc.WaitForReady(true)),
|
||||||
},
|
},
|
||||||
|
|
|
@ -33,7 +33,7 @@ func _client() (tree.TreeServiceClient, error) {
|
||||||
|
|
||||||
opts := []grpc.DialOption{
|
opts := []grpc.DialOption{
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
tracing.NewUnaryClientInterceptor(),
|
tracing.NewUnaryClientInteceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithChainStreamInterceptor(
|
grpc.WithChainStreamInterceptor(
|
||||||
tracing.NewStreamClientInterceptor(),
|
tracing.NewStreamClientInterceptor(),
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -39,14 +38,13 @@ func reloadConfig() error {
|
||||||
}
|
}
|
||||||
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
|
||||||
audit.Store(cfg.GetBool("audit.enabled"))
|
audit.Store(cfg.GetBool("audit.enabled"))
|
||||||
var logPrm logger.Prm
|
|
||||||
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Reload(logPrm)
|
logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
|
||||||
|
|
||||||
return nil
|
return logPrm.Reload()
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchForSignal(ctx context.Context, cancel func()) {
|
func watchForSignal(ctx context.Context, cancel func()) {
|
||||||
|
|
|
@ -31,6 +31,7 @@ const (
|
||||||
var (
|
var (
|
||||||
wg = new(sync.WaitGroup)
|
wg = new(sync.WaitGroup)
|
||||||
intErr = make(chan error) // internal inner ring errors
|
intErr = make(chan error) // internal inner ring errors
|
||||||
|
logPrm = new(logger.Prm)
|
||||||
innerRing *innerring.Server
|
innerRing *innerring.Server
|
||||||
pprofCmp *pprofComponent
|
pprofCmp *pprofComponent
|
||||||
metricsCmp *httpComponent
|
metricsCmp *httpComponent
|
||||||
|
@ -69,7 +70,6 @@ func main() {
|
||||||
|
|
||||||
metrics := irMetrics.NewInnerRingMetrics()
|
metrics := irMetrics.NewInnerRingMetrics()
|
||||||
|
|
||||||
var logPrm logger.Prm
|
|
||||||
err = logPrm.SetLevelString(
|
err = logPrm.SetLevelString(
|
||||||
cfg.GetString("logger.level"),
|
cfg.GetString("logger.level"),
|
||||||
)
|
)
|
||||||
|
|
|
@ -473,6 +473,7 @@ type shared struct {
|
||||||
// dynamicConfiguration stores parameters of the
|
// dynamicConfiguration stores parameters of the
|
||||||
// components that supports runtime reconfigurations.
|
// components that supports runtime reconfigurations.
|
||||||
type dynamicConfiguration struct {
|
type dynamicConfiguration struct {
|
||||||
|
logger *logger.Prm
|
||||||
pprof *httpComponent
|
pprof *httpComponent
|
||||||
metrics *httpComponent
|
metrics *httpComponent
|
||||||
}
|
}
|
||||||
|
@ -713,8 +714,7 @@ func initCfg(appCfg *config.Config) *cfg {
|
||||||
|
|
||||||
netState.metrics = c.metricsCollector
|
netState.metrics = c.metricsCollector
|
||||||
|
|
||||||
logPrm, err := c.loggerPrm()
|
logPrm := c.loggerPrm()
|
||||||
fatalOnErr(err)
|
|
||||||
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook()
|
||||||
log, err := logger.NewLogger(logPrm)
|
log, err := logger.NewLogger(logPrm)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
@ -1076,22 +1076,26 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
|
||||||
return sh
|
return sh
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) loggerPrm() (logger.Prm, error) {
|
func (c *cfg) loggerPrm() *logger.Prm {
|
||||||
var prm logger.Prm
|
// check if it has been inited before
|
||||||
// (re)init read configuration
|
if c.dynamicConfiguration.logger == nil {
|
||||||
err := prm.SetLevelString(c.LoggerCfg.level)
|
c.dynamicConfiguration.logger = new(logger.Prm)
|
||||||
if err != nil {
|
|
||||||
// not expected since validation should be performed before
|
|
||||||
return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level)
|
|
||||||
}
|
}
|
||||||
err = prm.SetDestination(c.LoggerCfg.destination)
|
|
||||||
if err != nil {
|
|
||||||
// not expected since validation should be performed before
|
|
||||||
return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination)
|
|
||||||
}
|
|
||||||
prm.PrependTimestamp = c.LoggerCfg.timestamp
|
|
||||||
|
|
||||||
return prm, nil
|
// (re)init read configuration
|
||||||
|
err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level)
|
||||||
|
if err != nil {
|
||||||
|
// not expected since validation should be performed before
|
||||||
|
panic("incorrect log level format: " + c.LoggerCfg.level)
|
||||||
|
}
|
||||||
|
err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination)
|
||||||
|
if err != nil {
|
||||||
|
// not expected since validation should be performed before
|
||||||
|
panic("incorrect log destination format: " + c.LoggerCfg.destination)
|
||||||
|
}
|
||||||
|
c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
|
||||||
|
|
||||||
|
return c.dynamicConfiguration.logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) LocalAddress() network.AddressGroup {
|
func (c *cfg) LocalAddress() network.AddressGroup {
|
||||||
|
@ -1331,7 +1335,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
// all the components are expected to support
|
// all the components are expected to support
|
||||||
// Logger's dynamic reconfiguration approach
|
// Logger's dynamic reconfiguration approach
|
||||||
|
|
||||||
components := c.getComponents(ctx)
|
// Logger
|
||||||
|
|
||||||
|
logPrm := c.loggerPrm()
|
||||||
|
|
||||||
|
components := c.getComponents(ctx, logPrm)
|
||||||
|
|
||||||
// Object
|
// Object
|
||||||
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
|
||||||
|
@ -1369,17 +1377,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) getComponents(ctx context.Context) []dCmp {
|
func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
|
||||||
var components []dCmp
|
var components []dCmp
|
||||||
|
|
||||||
components = append(components, dCmp{"logger", func() error {
|
components = append(components, dCmp{"logger", logPrm.Reload})
|
||||||
prm, err := c.loggerPrm()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.log.Reload(prm)
|
|
||||||
return nil
|
|
||||||
}})
|
|
||||||
components = append(components, dCmp{"runtime", func() error {
|
components = append(components, dCmp{"runtime", func() error {
|
||||||
setRuntimeParameters(ctx, c)
|
setRuntimeParameters(ctx, c)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -31,11 +31,12 @@ func Limits(c *config.Config) []LimitConfig {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if sc.Value("max_ops") == nil {
|
maxOps := config.IntSafe(sc, "max_ops")
|
||||||
|
if maxOps == 0 {
|
||||||
panic("no max operations for method group")
|
panic("no max operations for method group")
|
||||||
}
|
}
|
||||||
|
|
||||||
limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")})
|
limits = append(limits, LimitConfig{methods, maxOps})
|
||||||
}
|
}
|
||||||
|
|
||||||
return limits
|
return limits
|
||||||
|
|
|
@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("no max operations", func(t *testing.T) {
|
t.Run("no max operations", func(t *testing.T) {
|
||||||
const path = "testdata/no_max_ops"
|
const path = "testdata/node"
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
fileConfigTest := func(c *config.Config) {
|
||||||
require.Panics(t, func() { _ = Limits(c) })
|
require.Panics(t, func() { _ = Limits(c) })
|
||||||
|
@ -50,28 +50,4 @@ func TestRPCSection(t *testing.T) {
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
configtest.ForEnvFileType(t, path, fileConfigTest)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("zero max operations", func(t *testing.T) {
|
|
||||||
const path = "testdata/zero_max_ops"
|
|
||||||
|
|
||||||
fileConfigTest := func(c *config.Config) {
|
|
||||||
limits := Limits(c)
|
|
||||||
require.Len(t, limits, 2)
|
|
||||||
|
|
||||||
limit0 := limits[0]
|
|
||||||
limit1 := limits[1]
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"})
|
|
||||||
require.Equal(t, limit0.MaxOps, int64(0))
|
|
||||||
|
|
||||||
require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"})
|
|
||||||
require.Equal(t, limit1.MaxOps, int64(10000))
|
|
||||||
}
|
|
||||||
|
|
||||||
configtest.ForEachFileType(path, fileConfigTest)
|
|
||||||
|
|
||||||
t.Run("ENV", func(t *testing.T) {
|
|
||||||
configtest.ForEnvFileType(t, path, fileConfigTest)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put"
|
|
||||||
FROSTFS_RPC_LIMITS_0_MAX_OPS=0
|
|
||||||
FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
FROSTFS_RPC_LIMITS_1_MAX_OPS=10000
|
|
|
@ -1,19 +0,0 @@
|
||||||
{
|
|
||||||
"rpc": {
|
|
||||||
"limits": [
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/PutSingle",
|
|
||||||
"/neo.fs.v2.object.ObjectService/Put"
|
|
||||||
],
|
|
||||||
"max_ops": 0
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"methods": [
|
|
||||||
"/neo.fs.v2.object.ObjectService/Get"
|
|
||||||
],
|
|
||||||
"max_ops": 10000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
rpc:
|
|
||||||
limits:
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/PutSingle
|
|
||||||
- /neo.fs.v2.object.ObjectService/Put
|
|
||||||
max_ops: 0
|
|
||||||
- methods:
|
|
||||||
- /neo.fs.v2.object.ObjectService/Get
|
|
||||||
max_ops: 10000
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
|
||||||
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
|
||||||
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||||
|
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
|
||||||
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
|
||||||
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
|
||||||
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
|
||||||
|
@ -171,10 +172,12 @@ func initObjectService(c *cfg) {
|
||||||
|
|
||||||
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch)
|
||||||
|
|
||||||
apeSvc := createAPEService(c, &irFetcher, splitSvc)
|
apeSvc := createAPEService(c, splitSvc)
|
||||||
|
|
||||||
|
aclSvc := createACLServiceV2(c, apeSvc, &irFetcher)
|
||||||
|
|
||||||
var commonSvc objectService.Common
|
var commonSvc objectService.Common
|
||||||
commonSvc.Init(&c.internals, apeSvc)
|
commonSvc.Init(&c.internals, aclSvc)
|
||||||
|
|
||||||
respSvc := objectService.NewResponseService(
|
respSvc := objectService.NewResponseService(
|
||||||
&commonSvc,
|
&commonSvc,
|
||||||
|
@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher {
|
func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
|
||||||
return &innerRingFetcherWithNotary{
|
return &innerRingFetcherWithNotary{
|
||||||
sidechain: c.cfgMorph.client,
|
sidechain: c.cfgMorph.client,
|
||||||
}
|
}
|
||||||
|
@ -426,7 +429,17 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service {
|
||||||
|
return v2.New(
|
||||||
|
apeSvc,
|
||||||
|
c.netMapSource,
|
||||||
|
irFetcher,
|
||||||
|
c.cfgObject.cnrSource,
|
||||||
|
v2.WithLogger(c.log),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
|
||||||
return objectAPE.NewService(
|
return objectAPE.NewService(
|
||||||
objectAPE.NewChecker(
|
objectAPE.NewChecker(
|
||||||
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
|
||||||
|
@ -438,7 +451,6 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
|
||||||
c.cfgObject.cnrSource,
|
c.cfgObject.cnrSource,
|
||||||
c.binPublicKey,
|
c.binPublicKey,
|
||||||
),
|
),
|
||||||
objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource),
|
|
||||||
splitSvc,
|
splitSvc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -21,4 +22,17 @@ func TestValidate(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("mainnet", func(t *testing.T) {
|
||||||
|
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||||
|
p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml")
|
||||||
|
c := config.New(p, "", config.EnvPrefix)
|
||||||
|
require.NoError(t, validateConfig(c))
|
||||||
|
})
|
||||||
|
t.Run("testnet", func(t *testing.T) {
|
||||||
|
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||||
|
p := filepath.Join(exampleConfigPrefix, "testnet/config.yml")
|
||||||
|
c := config.New(p, "", config.EnvPrefix)
|
||||||
|
require.NoError(t, validateConfig(c))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
28
config/mainnet/README.md
Normal file
28
config/mainnet/README.md
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
# N3 Mainnet Storage node configuration
|
||||||
|
|
||||||
|
Here is a template for simple storage node configuration in N3 Mainnet.
|
||||||
|
Make sure to specify correct values instead of `<...>` placeholders.
|
||||||
|
Do not change `contracts` section. Run the latest frostfs-node release with
|
||||||
|
the fixed config `frostfs-node -c config.yml`
|
||||||
|
|
||||||
|
To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract.
|
||||||
|
The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221`
|
||||||
|
(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`)
|
||||||
|
|
||||||
|
## Tips
|
||||||
|
|
||||||
|
Use `grpcs://` scheme in the announced address if you enable TLS in grpc server.
|
||||||
|
```yaml
|
||||||
|
node:
|
||||||
|
addresses:
|
||||||
|
- grpcs://frostfs.my.org:8080
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1
|
||||||
|
0:
|
||||||
|
endpoint: frostfs.my.org:8080
|
||||||
|
tls:
|
||||||
|
enabled: true
|
||||||
|
certificate: /path/to/cert
|
||||||
|
key: /path/to/key
|
||||||
|
```
|
70
config/mainnet/config.yml
Normal file
70
config/mainnet/config.yml
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
node:
|
||||||
|
wallet:
|
||||||
|
path: <path/to/wallet>
|
||||||
|
address: <address-in-wallet>
|
||||||
|
password: <password>
|
||||||
|
addresses:
|
||||||
|
- <announced.address:port>
|
||||||
|
attribute_0: UN-LOCODE:<XX YYY>
|
||||||
|
attribute_1: Price:100000
|
||||||
|
attribute_2: User-Agent:FrostFS\/0.9999
|
||||||
|
|
||||||
|
grpc:
|
||||||
|
num: 1
|
||||||
|
0:
|
||||||
|
endpoint: <listen.local.address:port>
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
storage:
|
||||||
|
shard_num: 1
|
||||||
|
shard:
|
||||||
|
0:
|
||||||
|
metabase:
|
||||||
|
path: /storage/path/metabase
|
||||||
|
perm: 0600
|
||||||
|
blobstor:
|
||||||
|
- path: /storage/path/blobovnicza
|
||||||
|
type: blobovnicza
|
||||||
|
perm: 0600
|
||||||
|
opened_cache_capacity: 32
|
||||||
|
depth: 1
|
||||||
|
width: 1
|
||||||
|
- path: /storage/path/fstree
|
||||||
|
type: fstree
|
||||||
|
perm: 0600
|
||||||
|
depth: 4
|
||||||
|
writecache:
|
||||||
|
enabled: false
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 100
|
||||||
|
remover_sleep_interval: 1m
|
||||||
|
|
||||||
|
logger:
|
||||||
|
level: info
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:9090
|
||||||
|
shutdown_timeout: 15s
|
||||||
|
|
||||||
|
object:
|
||||||
|
put:
|
||||||
|
remote_pool_size: 100
|
||||||
|
local_pool_size: 100
|
||||||
|
|
||||||
|
morph:
|
||||||
|
rpc_endpoint:
|
||||||
|
- wss://rpc1.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc2.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc3.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc4.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc5.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc6.morph.frostfs.info:40341/ws
|
||||||
|
- wss://rpc7.morph.frostfs.info:40341/ws
|
||||||
|
dial_timeout: 20s
|
||||||
|
|
||||||
|
contracts:
|
||||||
|
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
|
||||||
|
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
|
||||||
|
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
|
129
config/testnet/README.md
Normal file
129
config/testnet/README.md
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
# N3 Testnet Storage node configuration
|
||||||
|
|
||||||
|
There is a prepared configuration for NeoFS Storage Node deployment in
|
||||||
|
N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared
|
||||||
|
docker image and run it with docker-compose.
|
||||||
|
|
||||||
|
## Build image
|
||||||
|
|
||||||
|
Prepared **frostfs-storage-testnet** image is available at Docker Hub.
|
||||||
|
However, if you need to rebuild it for some reason, run
|
||||||
|
`make image-storage-testnet` command.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make image-storage-testnet
|
||||||
|
...
|
||||||
|
Successfully built ab0557117b02
|
||||||
|
Successfully tagged nspccdev/neofs-storage-testnet:0.25.1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deploy node
|
||||||
|
|
||||||
|
To run a storage node in N3 Testnet environment, you should deposit GAS assets,
|
||||||
|
update docker-compose file and start the node.
|
||||||
|
|
||||||
|
### Deposit
|
||||||
|
|
||||||
|
The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a
|
||||||
|
bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx.
|
||||||
|
|
||||||
|
First, obtain GAS in N3 Testnet chain. You can do that with
|
||||||
|
[faucet](https://neowish.ngd.network) service.
|
||||||
|
|
||||||
|
Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet.
|
||||||
|
You can provide scripthash in the `data` argument of transfer tx to make a
|
||||||
|
deposit to a specified account. Otherwise, deposit is made to the tx sender.
|
||||||
|
|
||||||
|
NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`,
|
||||||
|
so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`.
|
||||||
|
|
||||||
|
See a deposit example with `neo-go`.
|
||||||
|
|
||||||
|
```
|
||||||
|
neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \
|
||||||
|
--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \
|
||||||
|
--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \
|
||||||
|
--token GAS \
|
||||||
|
--amount 1
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure
|
||||||
|
|
||||||
|
Next, configure `node_config.env` file. Change endpoints values. Both
|
||||||
|
should contain your **public** IP.
|
||||||
|
|
||||||
|
```
|
||||||
|
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||||
|
```
|
||||||
|
|
||||||
|
Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory)
|
||||||
|
attribute.
|
||||||
|
|
||||||
|
```
|
||||||
|
NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ADDRESSES=65.52.183.157:36512
|
||||||
|
NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
|
||||||
|
```
|
||||||
|
|
||||||
|
You can validate UN/LOCODE attribute in
|
||||||
|
[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
|
||||||
|
with frostfs-cli.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED'
|
||||||
|
Country: Russia
|
||||||
|
Location: Saint Petersburg (ex Leningrad)
|
||||||
|
Continent: Europe
|
||||||
|
Subdivision: [SPE] Sankt-Peterburg
|
||||||
|
Coordinates: 59.53, 30.15
|
||||||
|
```
|
||||||
|
|
||||||
|
It is recommended to pass the node's key as a file. To do so, convert your wallet
|
||||||
|
WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file.
|
||||||
|
|
||||||
|
```
|
||||||
|
// Print WIF in a 32-byte hex format
|
||||||
|
$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||||
|
PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56
|
||||||
|
PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059
|
||||||
|
WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s
|
||||||
|
Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ
|
||||||
|
ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc
|
||||||
|
ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf
|
||||||
|
|
||||||
|
// Save 32-byte hex into a file
|
||||||
|
$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, specify the path to this file in `docker-compose.yml`
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- frostfs_storage:/storage
|
||||||
|
- ./my_wallet.key:/node.key
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
NeoFS objects will be stored on your machine. By default, docker-compose
|
||||||
|
is configured to store objects in named docker volume `frostfs_storage`. You can
|
||||||
|
specify a directory on the filesystem to store objects there.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
volumes:
|
||||||
|
- /home/username/frostfs/rc3/storage:/storage
|
||||||
|
- ./my_wallet.key:/node.key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start
|
||||||
|
|
||||||
|
Run the node with `docker-compose up` command and stop it with `docker-compose down`.
|
||||||
|
|
||||||
|
### Debug
|
||||||
|
|
||||||
|
To print node logs, use `docker logs frostfs-testnet`. To print debug messages in
|
||||||
|
log, set up log level to debug with this env:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
- NEOFS_LOGGER_LEVEL=debug
|
||||||
|
```
|
52
config/testnet/config.yml
Normal file
52
config/testnet/config.yml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
logger:
|
||||||
|
level: info
|
||||||
|
|
||||||
|
morph:
|
||||||
|
rpc_endpoint:
|
||||||
|
- wss://rpc01.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc02.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc03.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc04.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc05.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc06.morph.testnet.frostfs.info:51331/ws
|
||||||
|
- wss://rpc07.morph.testnet.frostfs.info:51331/ws
|
||||||
|
dial_timeout: 20s
|
||||||
|
|
||||||
|
contracts:
|
||||||
|
balance: e0420c216003747626670d1424569c17c79015bf
|
||||||
|
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
|
||||||
|
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
|
||||||
|
|
||||||
|
node:
|
||||||
|
key: /node.key
|
||||||
|
attribute_0: Deployed:SelfHosted
|
||||||
|
attribute_1: User-Agent:FrostFS\/0.9999
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:9090
|
||||||
|
shutdown_timeout: 15s
|
||||||
|
|
||||||
|
storage:
|
||||||
|
shard_num: 1
|
||||||
|
shard:
|
||||||
|
0:
|
||||||
|
metabase:
|
||||||
|
path: /storage/metabase
|
||||||
|
perm: 0777
|
||||||
|
blobstor:
|
||||||
|
- path: /storage/path/blobovnicza
|
||||||
|
type: blobovnicza
|
||||||
|
perm: 0600
|
||||||
|
opened_cache_capacity: 32
|
||||||
|
depth: 1
|
||||||
|
width: 1
|
||||||
|
- path: /storage/path/fstree
|
||||||
|
type: fstree
|
||||||
|
perm: 0600
|
||||||
|
depth: 4
|
||||||
|
writecache:
|
||||||
|
enabled: false
|
||||||
|
gc:
|
||||||
|
remover_batch_size: 100
|
||||||
|
remover_sleep_interval: 1m
|
|
@ -51,7 +51,10 @@ However, all mode changing operations are idempotent.
|
||||||
|
|
||||||
## Automatic mode changes
|
## Automatic mode changes
|
||||||
|
|
||||||
A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold.
|
Shard can automatically switch to a `degraded-read-only` mode in 3 cases:
|
||||||
|
1. If the metabase was not available or couldn't be opened/initialized during shard startup.
|
||||||
|
2. If shard error counter exceeds threshold.
|
||||||
|
3. If the metabase couldn't be reopened during SIGHUP handling.
|
||||||
|
|
||||||
# Detach shard
|
# Detach shard
|
||||||
|
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -7,8 +7,8 @@ require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||||
|
|
8
go.sum
8
go.sum
|
@ -6,10 +6,10 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8=
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||||
|
|
|
@ -15,7 +15,7 @@ func newQoSMetrics() *QoSMetrics {
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: qosSubsystem,
|
Subsystem: qosSubsystem,
|
||||||
Name: "operations_total",
|
Name: "operations_total",
|
||||||
Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard",
|
Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard",
|
||||||
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
|
}, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tag = IOTagClient
|
tag = IOTagClient
|
||||||
}
|
}
|
||||||
if tag.IsLocal() {
|
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||||
tag = IOTagInternal
|
tag = IOTagInternal
|
||||||
}
|
}
|
||||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||||
|
@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tag = IOTagClient
|
tag = IOTagClient
|
||||||
}
|
}
|
||||||
if tag.IsLocal() {
|
if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache {
|
||||||
tag = IOTagInternal
|
tag = IOTagInternal
|
||||||
}
|
}
|
||||||
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
ctx = tagging.ContextWithIOTag(ctx, tag.String())
|
||||||
|
|
|
@ -74,7 +74,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) {
|
||||||
|
|
||||||
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
|
func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo {
|
||||||
result := make(map[string]scheduling.TagInfo)
|
result := make(map[string]scheduling.TagInfo)
|
||||||
for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} {
|
for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} {
|
||||||
result[tag.String()] = scheduling.TagInfo{
|
result[tag.String()] = scheduling.TagInfo{
|
||||||
Share: defaultShare,
|
Share: defaultShare,
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,12 @@ package qos
|
||||||
const unknownStatsTag = "unknown"
|
const unknownStatsTag = "unknown"
|
||||||
|
|
||||||
var statTags = map[string]struct{}{
|
var statTags = map[string]struct{}{
|
||||||
IOTagBackground.String(): {},
|
|
||||||
IOTagClient.String(): {},
|
IOTagClient.String(): {},
|
||||||
IOTagCritical.String(): {},
|
IOTagBackground.String(): {},
|
||||||
IOTagInternal.String(): {},
|
IOTagInternal.String(): {},
|
||||||
IOTagPolicer.String(): {},
|
IOTagPolicer.String(): {},
|
||||||
IOTagTreeSync.String(): {},
|
|
||||||
IOTagWritecache.String(): {},
|
IOTagWritecache.String(): {},
|
||||||
|
IOTagCritical.String(): {},
|
||||||
unknownStatsTag: {},
|
unknownStatsTag: {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,33 +10,30 @@ import (
|
||||||
type IOTag string
|
type IOTag string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
IOTagBackground IOTag = "background"
|
|
||||||
IOTagClient IOTag = "client"
|
IOTagClient IOTag = "client"
|
||||||
IOTagCritical IOTag = "critical"
|
|
||||||
IOTagInternal IOTag = "internal"
|
IOTagInternal IOTag = "internal"
|
||||||
IOTagPolicer IOTag = "policer"
|
IOTagBackground IOTag = "background"
|
||||||
IOTagTreeSync IOTag = "treesync"
|
|
||||||
IOTagWritecache IOTag = "writecache"
|
IOTagWritecache IOTag = "writecache"
|
||||||
|
IOTagPolicer IOTag = "policer"
|
||||||
|
IOTagCritical IOTag = "critical"
|
||||||
|
|
||||||
ioTagUnknown IOTag = ""
|
ioTagUnknown IOTag = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
func FromRawString(s string) (IOTag, error) {
|
func FromRawString(s string) (IOTag, error) {
|
||||||
switch s {
|
switch s {
|
||||||
case string(IOTagBackground):
|
|
||||||
return IOTagBackground, nil
|
|
||||||
case string(IOTagClient):
|
|
||||||
return IOTagClient, nil
|
|
||||||
case string(IOTagCritical):
|
case string(IOTagCritical):
|
||||||
return IOTagCritical, nil
|
return IOTagCritical, nil
|
||||||
|
case string(IOTagClient):
|
||||||
|
return IOTagClient, nil
|
||||||
case string(IOTagInternal):
|
case string(IOTagInternal):
|
||||||
return IOTagInternal, nil
|
return IOTagInternal, nil
|
||||||
case string(IOTagPolicer):
|
case string(IOTagBackground):
|
||||||
return IOTagPolicer, nil
|
return IOTagBackground, nil
|
||||||
case string(IOTagTreeSync):
|
|
||||||
return IOTagTreeSync, nil
|
|
||||||
case string(IOTagWritecache):
|
case string(IOTagWritecache):
|
||||||
return IOTagWritecache, nil
|
return IOTagWritecache, nil
|
||||||
|
case string(IOTagPolicer):
|
||||||
|
return IOTagPolicer, nil
|
||||||
default:
|
default:
|
||||||
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
|
return ioTagUnknown, fmt.Errorf("unknown tag %s", s)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +50,3 @@ func IOTagFromContext(ctx context.Context) string {
|
||||||
}
|
}
|
||||||
return tag
|
return tag
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t IOTag) IsLocal() bool {
|
|
||||||
return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync
|
|
||||||
}
|
|
||||||
|
|
|
@ -42,12 +42,11 @@ func validateOpConfig(c limits.OpConfig) error {
|
||||||
|
|
||||||
func validateTags(configTags []limits.IOTagConfig) error {
|
func validateTags(configTags []limits.IOTagConfig) error {
|
||||||
tags := map[IOTag]tagConfig{
|
tags := map[IOTag]tagConfig{
|
||||||
IOTagBackground: {},
|
|
||||||
IOTagClient: {},
|
IOTagClient: {},
|
||||||
IOTagInternal: {},
|
IOTagInternal: {},
|
||||||
IOTagPolicer: {},
|
IOTagBackground: {},
|
||||||
IOTagTreeSync: {},
|
|
||||||
IOTagWritecache: {},
|
IOTagWritecache: {},
|
||||||
|
IOTagPolicer: {},
|
||||||
}
|
}
|
||||||
for _, t := range configTags {
|
for _, t := range configTags {
|
||||||
tag, err := FromRawString(t.Tag)
|
tag, err := FromRawString(t.Tag)
|
||||||
|
|
|
@ -139,8 +139,7 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
|
||||||
|
|
||||||
var containerID cid.ID
|
var containerID cid.ID
|
||||||
var offset []byte
|
var offset []byte
|
||||||
graveyardBkt := tx.Bucket(graveyardBucketName)
|
bc := newBucketCache()
|
||||||
garbageBkt := tx.Bucket(garbageBucketName)
|
|
||||||
|
|
||||||
rawAddr := make([]byte, cidSize, addressKeySize)
|
rawAddr := make([]byte, cidSize, addressKeySize)
|
||||||
|
|
||||||
|
@ -169,7 +168,7 @@ loop:
|
||||||
bkt := tx.Bucket(name)
|
bkt := tx.Bucket(name)
|
||||||
if bkt != nil {
|
if bkt != nil {
|
||||||
copy(rawAddr, cidRaw)
|
copy(rawAddr, cidRaw)
|
||||||
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
|
result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID,
|
||||||
result, count, cursor, threshold, currEpoch)
|
result, count, cursor, threshold, currEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
@ -204,9 +203,10 @@ loop:
|
||||||
|
|
||||||
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
|
// selectNFromBucket similar to selectAllFromBucket but uses cursor to find
|
||||||
// object to start selecting from. Ignores inhumed objects.
|
// object to start selecting from. Ignores inhumed objects.
|
||||||
func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
func selectNFromBucket(
|
||||||
|
bc *bucketCache,
|
||||||
|
bkt *bbolt.Bucket, // main bucket
|
||||||
objType objectSDK.Type, // type of the objects stored in the main bucket
|
objType objectSDK.Type, // type of the objects stored in the main bucket
|
||||||
graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets
|
|
||||||
cidRaw []byte, // container ID prefix, optimization
|
cidRaw []byte, // container ID prefix, optimization
|
||||||
cnt cid.ID, // container ID
|
cnt cid.ID, // container ID
|
||||||
to []objectcore.Info, // listing result
|
to []objectcore.Info, // listing result
|
||||||
|
@ -219,7 +219,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
cursor = new(Cursor)
|
cursor = new(Cursor)
|
||||||
}
|
}
|
||||||
|
|
||||||
count := len(to)
|
|
||||||
c := bkt.Cursor()
|
c := bkt.Cursor()
|
||||||
k, v := c.First()
|
k, v := c.First()
|
||||||
|
|
||||||
|
@ -231,7 +230,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
for ; k != nil; k, v = c.Next() {
|
for ; k != nil; k, v = c.Next() {
|
||||||
if count >= limit {
|
if len(to) >= limit {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,6 +240,8 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = k
|
offset = k
|
||||||
|
graveyardBkt := getGraveyardBucket(bc, bkt.Tx())
|
||||||
|
garbageBkt := getGarbageBucket(bc, bkt.Tx())
|
||||||
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
|
if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -251,7 +252,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
|
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
|
||||||
if hasExpEpoch && expEpoch < currEpoch && !objectLocked(bkt.Tx(), cnt, obj) {
|
if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,7 +274,6 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
|
||||||
a.SetContainer(cnt)
|
a.SetContainer(cnt)
|
||||||
a.SetObject(obj)
|
a.SetObject(obj)
|
||||||
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
|
to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo})
|
||||||
count++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return to, offset, cursor, nil
|
return to, offset, cursor, nil
|
||||||
|
|
|
@ -527,8 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var release qos.ReleaseFunc
|
release, err := s.opsLimiter.ReadRequest(ctx)
|
||||||
release, err = s.opsLimiter.ReadRequest(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
||||||
s.m.RUnlock()
|
s.m.RUnlock()
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
|
||||||
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
|
morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics"
|
||||||
|
@ -61,9 +60,6 @@ type Client struct {
|
||||||
rpcActor *actor.Actor // neo-go RPC actor
|
rpcActor *actor.Actor // neo-go RPC actor
|
||||||
gasToken *nep17.Token // neo-go GAS token wrapper
|
gasToken *nep17.Token // neo-go GAS token wrapper
|
||||||
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
|
rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper
|
||||||
nnsHash util.Uint160 // NNS contract hash
|
|
||||||
|
|
||||||
nnsReader *nnsClient.ContractReader // NNS contract wrapper
|
|
||||||
|
|
||||||
acc *wallet.Account // neo account
|
acc *wallet.Account // neo account
|
||||||
accAddr util.Uint160 // account's address
|
accAddr util.Uint160 // account's address
|
||||||
|
@ -98,12 +94,27 @@ type Client struct {
|
||||||
type cache struct {
|
type cache struct {
|
||||||
m sync.RWMutex
|
m sync.RWMutex
|
||||||
|
|
||||||
|
nnsHash *util.Uint160
|
||||||
gKey *keys.PublicKey
|
gKey *keys.PublicKey
|
||||||
txHeights *lru.Cache[util.Uint256, uint32]
|
txHeights *lru.Cache[util.Uint256, uint32]
|
||||||
|
|
||||||
metrics metrics.MorphCacheMetrics
|
metrics metrics.MorphCacheMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *cache) nns() *util.Uint160 {
|
||||||
|
c.m.RLock()
|
||||||
|
defer c.m.RUnlock()
|
||||||
|
|
||||||
|
return c.nnsHash
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cache) setNNSHash(nnsHash util.Uint160) {
|
||||||
|
c.m.Lock()
|
||||||
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
c.nnsHash = &nnsHash
|
||||||
|
}
|
||||||
|
|
||||||
func (c *cache) groupKey() *keys.PublicKey {
|
func (c *cache) groupKey() *keys.PublicKey {
|
||||||
c.m.RLock()
|
c.m.RLock()
|
||||||
defer c.m.RUnlock()
|
defer c.m.RUnlock()
|
||||||
|
@ -122,6 +133,7 @@ func (c *cache) invalidate() {
|
||||||
c.m.Lock()
|
c.m.Lock()
|
||||||
defer c.m.Unlock()
|
defer c.m.Unlock()
|
||||||
|
|
||||||
|
c.nnsHash = nil
|
||||||
c.gKey = nil
|
c.gKey = nil
|
||||||
c.txHeights.Purge()
|
c.txHeights.Purge()
|
||||||
}
|
}
|
||||||
|
@ -579,7 +591,6 @@ func (c *Client) setActor(act *actor.Actor) {
|
||||||
c.rpcActor = act
|
c.rpcActor = act
|
||||||
c.gasToken = nep17.New(act, gas.Hash)
|
c.gasToken = nep17.New(act, gas.Hash)
|
||||||
c.rolemgmt = rolemgmt.New(act)
|
c.rolemgmt = rolemgmt.New(act)
|
||||||
c.nnsReader = nnsClient.NewReader(act, c.nnsHash)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) GetActor() *actor.Actor {
|
func (c *Client) GetActor() *actor.Actor {
|
||||||
|
|
|
@ -145,11 +145,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
|
||||||
if cli.client == nil {
|
if cli.client == nil {
|
||||||
return nil, ErrNoHealthyEndpoint
|
return nil, ErrNoHealthyEndpoint
|
||||||
}
|
}
|
||||||
cs, err := cli.client.GetContractStateByID(nnsContractID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("resolve nns hash: %w", err)
|
|
||||||
}
|
|
||||||
cli.nnsHash = cs.Hash
|
|
||||||
cli.setActor(act)
|
cli.setActor(act)
|
||||||
|
|
||||||
go cli.closeWaiter(ctx)
|
go cli.closeWaiter(ctx)
|
||||||
|
|
|
@ -8,12 +8,14 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
|
||||||
nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -35,8 +37,12 @@ const (
|
||||||
NNSPolicyContractName = "policy.frostfs"
|
NNSPolicyContractName = "policy.frostfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNNSRecordNotFound means that there is no such record in NNS contract.
|
var (
|
||||||
var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
|
// ErrNNSRecordNotFound means that there is no such record in NNS contract.
|
||||||
|
ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract")
|
||||||
|
|
||||||
|
errEmptyResultStack = errors.New("returned result stack is empty")
|
||||||
|
)
|
||||||
|
|
||||||
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
|
// NNSAlphabetContractName returns contract name of the alphabet contract in NNS
|
||||||
// based on alphabet index.
|
// based on alphabet index.
|
||||||
|
@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) {
|
||||||
return util.Uint160{}, ErrConnectionLost
|
return util.Uint160{}, ErrConnectionLost
|
||||||
}
|
}
|
||||||
|
|
||||||
sh, err = nnsResolve(c.nnsReader, name)
|
nnsHash, err := c.NNSHash()
|
||||||
|
if err != nil {
|
||||||
|
return util.Uint160{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sh, err = nnsResolve(c.client, nnsHash, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return sh, fmt.Errorf("NNS.resolve: %w", err)
|
return sh, fmt.Errorf("NNS.resolve: %w", err)
|
||||||
}
|
}
|
||||||
return sh, nil
|
return sh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) {
|
// NNSHash returns NNS contract hash.
|
||||||
available, err := r.IsAvailable(domain)
|
func (c *Client) NNSHash() (util.Uint160, error) {
|
||||||
|
c.switchLock.RLock()
|
||||||
|
defer c.switchLock.RUnlock()
|
||||||
|
|
||||||
|
if c.inactive {
|
||||||
|
return util.Uint160{}, ErrConnectionLost
|
||||||
|
}
|
||||||
|
|
||||||
|
success := false
|
||||||
|
startedAt := time.Now()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt))
|
||||||
|
}()
|
||||||
|
|
||||||
|
nnsHash := c.cache.nns()
|
||||||
|
|
||||||
|
if nnsHash == nil {
|
||||||
|
cs, err := c.client.GetContractStateByID(nnsContractID)
|
||||||
|
if err != nil {
|
||||||
|
return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.setNNSHash(cs.Hash)
|
||||||
|
nnsHash = &cs.Hash
|
||||||
|
}
|
||||||
|
success = true
|
||||||
|
return *nnsHash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) {
|
||||||
|
found, err := exists(c, nnsHash, domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
|
return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if available {
|
if !found {
|
||||||
return nil, ErrNNSRecordNotFound
|
return nil, ErrNNSRecordNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.Resolve(domain, big.NewInt(int64(nns.TXT)))
|
result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{
|
||||||
|
{
|
||||||
|
Type: smartcontract.StringType,
|
||||||
|
Value: domain,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: smartcontract.IntegerType,
|
||||||
|
Value: big.NewInt(int64(nns.TXT)),
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if result.State != vmstate.Halt.String() {
|
||||||
|
return nil, fmt.Errorf("invocation failed: %s", result.FaultException)
|
||||||
|
}
|
||||||
|
if len(result.Stack) == 0 {
|
||||||
|
return nil, errEmptyResultStack
|
||||||
|
}
|
||||||
|
return result.Stack[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) {
|
func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) {
|
||||||
arr, err := nnsResolveItem(r, domain)
|
res, err := nnsResolveItem(c, nnsHash, domain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.Uint160{}, err
|
return util.Uint160{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(arr) == 0 {
|
// Parse the result of resolving NNS record.
|
||||||
return util.Uint160{}, errors.New("NNS record is missing")
|
// It works with multiple formats (corresponding to multiple NNS versions).
|
||||||
|
// If array of hashes is provided, it returns only the first one.
|
||||||
|
if arr, ok := res.Value().([]stackitem.Item); ok {
|
||||||
|
if len(arr) == 0 {
|
||||||
|
return util.Uint160{}, errors.New("NNS record is missing")
|
||||||
|
}
|
||||||
|
res = arr[0]
|
||||||
}
|
}
|
||||||
bs, err := arr[0].TryBytes()
|
bs, err := res.TryBytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
|
return util.Uint160{}, fmt.Errorf("malformed response: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error
|
||||||
return util.Uint160{}, errors.New("no valid hashes are found")
|
return util.Uint160{}, errors.New("no valid hashes are found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) {
|
||||||
|
result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{
|
||||||
|
{
|
||||||
|
Type: smartcontract.StringType,
|
||||||
|
Value: domain,
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Stack) == 0 {
|
||||||
|
return false, errEmptyResultStack
|
||||||
|
}
|
||||||
|
|
||||||
|
res := result.Stack[0]
|
||||||
|
|
||||||
|
available, err := res.TryBool()
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("malformed response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// not available means that it is taken
|
||||||
|
// and, therefore, exists
|
||||||
|
return !available, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
|
// SetGroupSignerScope makes the default signer scope include all FrostFS contracts.
|
||||||
// Should be called for side-chain client only.
|
// Should be called for side-chain client only.
|
||||||
func (c *Client) SetGroupSignerScope() error {
|
func (c *Client) SetGroupSignerScope() error {
|
||||||
|
@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) {
|
||||||
return gKey, nil
|
return gKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName)
|
nnsHash, err := c.NNSHash()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(arr) == 0 {
|
item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
arr, ok := item.Value().([]stackitem.Item)
|
||||||
|
if !ok || len(arr) == 0 {
|
||||||
return nil, errors.New("NNS record is missing")
|
return nil, errors.New("NNS record is missing")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,8 @@ type (
|
||||||
|
|
||||||
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
|
alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness
|
||||||
|
|
||||||
proxy util.Uint160
|
notary util.Uint160
|
||||||
|
proxy util.Uint160
|
||||||
}
|
}
|
||||||
|
|
||||||
notaryCfg struct {
|
notaryCfg struct {
|
||||||
|
@ -101,6 +102,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error {
|
||||||
txValidTime: cfg.txValidTime,
|
txValidTime: cfg.txValidTime,
|
||||||
roundTime: cfg.roundTime,
|
roundTime: cfg.roundTime,
|
||||||
alphabetSource: cfg.alphabetSource,
|
alphabetSource: cfg.alphabetSource,
|
||||||
|
notary: notary.Hash,
|
||||||
}
|
}
|
||||||
|
|
||||||
c.notary = notaryCfg
|
c.notary = notaryCfg
|
||||||
|
@ -186,7 +188,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8)
|
||||||
func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
|
func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) {
|
||||||
txHash, vub, err := c.gasToken.Transfer(
|
txHash, vub, err := c.gasToken.Transfer(
|
||||||
c.accAddr,
|
c.accAddr,
|
||||||
notary.Hash,
|
c.notary.notary,
|
||||||
big.NewInt(int64(amount)),
|
big.NewInt(int64(amount)),
|
||||||
[]any{c.acc.PrivateKey().GetScriptHash(), till})
|
[]any{c.acc.PrivateKey().GetScriptHash(), till})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
4
pkg/network/cache/multi.go
vendored
4
pkg/network/cache/multi.go
vendored
|
@ -66,8 +66,8 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
||||||
metrics.NewUnaryClientInterceptor(),
|
metrics.NewUnaryClientInterceptor(),
|
||||||
tracing.NewUnaryClientInterceptor(),
|
tracing.NewUnaryClientInteceptor(),
|
||||||
tagging.NewUnaryClientInterceptor(),
|
tagging.NewUnaryClientInteceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithChainStreamInterceptor(
|
grpc.WithChainStreamInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
||||||
|
|
166
pkg/services/object/acl/eacl/v2/eacl_test.go
Normal file
166
pkg/services/object/acl/eacl/v2/eacl_test.go
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testLocalStorage struct {
|
||||||
|
t *testing.T
|
||||||
|
|
||||||
|
expAddr oid.Address
|
||||||
|
|
||||||
|
obj *objectSDK.Object
|
||||||
|
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
|
||||||
|
require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
|
||||||
|
require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
|
||||||
|
|
||||||
|
return s.obj, s.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func testXHeaders(strs ...string) []session.XHeader {
|
||||||
|
res := make([]session.XHeader, len(strs)/2)
|
||||||
|
|
||||||
|
for i := 0; i < len(strs); i += 2 {
|
||||||
|
res[i/2].SetKey(strs[i])
|
||||||
|
res[i/2].SetValue(strs[i+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHeadRequest(t *testing.T) {
|
||||||
|
req := new(objectV2.HeadRequest)
|
||||||
|
|
||||||
|
meta := new(session.RequestMetaHeader)
|
||||||
|
req.SetMetaHeader(meta)
|
||||||
|
|
||||||
|
body := new(objectV2.HeadRequestBody)
|
||||||
|
req.SetBody(body)
|
||||||
|
|
||||||
|
addr := oidtest.Address()
|
||||||
|
|
||||||
|
var addrV2 refs.Address
|
||||||
|
addr.WriteToV2(&addrV2)
|
||||||
|
|
||||||
|
body.SetAddress(&addrV2)
|
||||||
|
|
||||||
|
xKey := "x-key"
|
||||||
|
xVal := "x-val"
|
||||||
|
xHdrs := testXHeaders(
|
||||||
|
xKey, xVal,
|
||||||
|
)
|
||||||
|
|
||||||
|
meta.SetXHeaders(xHdrs)
|
||||||
|
|
||||||
|
obj := objectSDK.New()
|
||||||
|
|
||||||
|
attrKey := "attr_key"
|
||||||
|
attrVal := "attr_val"
|
||||||
|
var attr objectSDK.Attribute
|
||||||
|
attr.SetKey(attrKey)
|
||||||
|
attr.SetValue(attrVal)
|
||||||
|
obj.SetAttributes(attr)
|
||||||
|
|
||||||
|
table := new(eaclSDK.Table)
|
||||||
|
|
||||||
|
priv, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
senderKey := priv.PublicKey()
|
||||||
|
|
||||||
|
r := eaclSDK.NewRecord()
|
||||||
|
r.SetOperation(eaclSDK.OperationHead)
|
||||||
|
r.SetAction(eaclSDK.ActionDeny)
|
||||||
|
r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal)
|
||||||
|
r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal)
|
||||||
|
eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
|
||||||
|
|
||||||
|
table.AddRecord(r)
|
||||||
|
|
||||||
|
lStorage := &testLocalStorage{
|
||||||
|
t: t,
|
||||||
|
expAddr: addr,
|
||||||
|
obj: obj,
|
||||||
|
}
|
||||||
|
|
||||||
|
id := addr.Object()
|
||||||
|
|
||||||
|
newSource := func(t *testing.T) eaclSDK.TypedHeaderSource {
|
||||||
|
hdrSrc, err := NewMessageHeaderSource(
|
||||||
|
lStorage,
|
||||||
|
NewRequestXHeaderSource(req),
|
||||||
|
addr.Container(),
|
||||||
|
WithOID(&id))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return hdrSrc
|
||||||
|
}
|
||||||
|
|
||||||
|
cnr := addr.Container()
|
||||||
|
|
||||||
|
unit := new(eaclSDK.ValidationUnit).
|
||||||
|
WithContainerID(&cnr).
|
||||||
|
WithOperation(eaclSDK.OperationHead).
|
||||||
|
WithSenderKey(senderKey.Bytes()).
|
||||||
|
WithEACLTable(table)
|
||||||
|
|
||||||
|
validator := eaclSDK.NewValidator()
|
||||||
|
|
||||||
|
checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t)))
|
||||||
|
|
||||||
|
meta.SetXHeaders(nil)
|
||||||
|
|
||||||
|
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
|
||||||
|
|
||||||
|
meta.SetXHeaders(xHdrs)
|
||||||
|
|
||||||
|
obj.SetAttributes()
|
||||||
|
|
||||||
|
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
|
||||||
|
|
||||||
|
lStorage.err = errors.New("any error")
|
||||||
|
|
||||||
|
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
|
||||||
|
|
||||||
|
r.SetAction(eaclSDK.ActionAllow)
|
||||||
|
|
||||||
|
rID := eaclSDK.NewRecord()
|
||||||
|
rID.SetOperation(eaclSDK.OperationHead)
|
||||||
|
rID.SetAction(eaclSDK.ActionDeny)
|
||||||
|
rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object())
|
||||||
|
eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey))
|
||||||
|
|
||||||
|
table = eaclSDK.NewTable()
|
||||||
|
table.AddRecord(r)
|
||||||
|
table.AddRecord(rID)
|
||||||
|
|
||||||
|
unit.WithEACLTable(table)
|
||||||
|
checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
|
||||||
|
actual, fromRule := v.CalculateAction(u)
|
||||||
|
require.True(t, fromRule)
|
||||||
|
require.Equal(t, expected, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) {
|
||||||
|
actual, fromRule := v.CalculateAction(u)
|
||||||
|
require.False(t, fromRule)
|
||||||
|
require.Equal(t, eaclSDK.ActionAllow, actual)
|
||||||
|
}
|
246
pkg/services/object/acl/eacl/v2/headers.go
Normal file
246
pkg/services/object/acl/eacl/v2/headers.go
Normal file
|
@ -0,0 +1,246 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
|
||||||
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||||
|
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Option func(*cfg)
|
||||||
|
|
||||||
|
type cfg struct {
|
||||||
|
storage ObjectStorage
|
||||||
|
|
||||||
|
msg XHeaderSource
|
||||||
|
|
||||||
|
cnr cid.ID
|
||||||
|
obj *oid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
type ObjectStorage interface {
|
||||||
|
Head(context.Context, oid.Address) (*objectSDK.Object, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Request interface {
|
||||||
|
GetMetaHeader() *session.RequestMetaHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
type Response interface {
|
||||||
|
GetMetaHeader() *session.ResponseMetaHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
type headerSource struct {
|
||||||
|
requestHeaders []eaclSDK.Header
|
||||||
|
objectHeaders []eaclSDK.Header
|
||||||
|
|
||||||
|
incompleteObjectHeaders bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) {
|
||||||
|
cfg := &cfg{
|
||||||
|
storage: os,
|
||||||
|
cnr: cnrID,
|
||||||
|
msg: xhs,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range opts {
|
||||||
|
opts[i](cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.msg == nil {
|
||||||
|
return nil, errors.New("message is not provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
var res headerSource
|
||||||
|
|
||||||
|
err := cfg.readObjectHeaders(&res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.requestHeaders = cfg.msg.GetXHeaders()
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) {
|
||||||
|
switch typ {
|
||||||
|
default:
|
||||||
|
return nil, true
|
||||||
|
case eaclSDK.HeaderFromRequest:
|
||||||
|
return h.requestHeaders, true
|
||||||
|
case eaclSDK.HeaderFromObject:
|
||||||
|
return h.objectHeaders, !h.incompleteObjectHeaders
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type xHeader session.XHeader
|
||||||
|
|
||||||
|
func (x xHeader) Key() string {
|
||||||
|
return (*session.XHeader)(&x).GetKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x xHeader) Value() string {
|
||||||
|
return (*session.XHeader)(&x).GetValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errMissingOID = errors.New("object ID is missing")
|
||||||
|
|
||||||
|
func (h *cfg) readObjectHeaders(dst *headerSource) error {
|
||||||
|
switch m := h.msg.(type) {
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected message type %T", h.msg))
|
||||||
|
case requestXHeaderSource:
|
||||||
|
return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
|
||||||
|
case responseXHeaderSource:
|
||||||
|
return h.readObjectHeadersResponseXHeaderSource(m, dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
|
||||||
|
switch req := m.req.(type) {
|
||||||
|
case
|
||||||
|
*objectV2.GetRequest,
|
||||||
|
*objectV2.HeadRequest:
|
||||||
|
if h.obj == nil {
|
||||||
|
return errMissingOID
|
||||||
|
}
|
||||||
|
|
||||||
|
objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
|
||||||
|
|
||||||
|
dst.objectHeaders = objHeaders
|
||||||
|
dst.incompleteObjectHeaders = !completed
|
||||||
|
case
|
||||||
|
*objectV2.GetRangeRequest,
|
||||||
|
*objectV2.GetRangeHashRequest,
|
||||||
|
*objectV2.DeleteRequest:
|
||||||
|
if h.obj == nil {
|
||||||
|
return errMissingOID
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.objectHeaders = addressHeaders(h.cnr, h.obj)
|
||||||
|
case *objectV2.PutRequest:
|
||||||
|
if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
|
||||||
|
oV2 := new(objectV2.Object)
|
||||||
|
oV2.SetObjectID(v.GetObjectID())
|
||||||
|
oV2.SetHeader(v.GetHeader())
|
||||||
|
|
||||||
|
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
|
||||||
|
}
|
||||||
|
case *objectV2.PutSingleRequest:
|
||||||
|
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj)
|
||||||
|
case *objectV2.SearchRequest:
|
||||||
|
cnrV2 := req.GetBody().GetContainerID()
|
||||||
|
var cnr cid.ID
|
||||||
|
|
||||||
|
if cnrV2 != nil {
|
||||||
|
if err := cnr.ReadFromV2(*cnrV2); err != nil {
|
||||||
|
return fmt.Errorf("can't parse container ID: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
|
||||||
|
switch resp := m.resp.(type) {
|
||||||
|
default:
|
||||||
|
objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
|
||||||
|
|
||||||
|
dst.objectHeaders = objectHeaders
|
||||||
|
dst.incompleteObjectHeaders = !completed
|
||||||
|
case *objectV2.GetResponse:
|
||||||
|
if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
|
||||||
|
oV2 := new(objectV2.Object)
|
||||||
|
oV2.SetObjectID(v.GetObjectID())
|
||||||
|
oV2.SetHeader(v.GetHeader())
|
||||||
|
|
||||||
|
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
|
||||||
|
}
|
||||||
|
case *objectV2.HeadResponse:
|
||||||
|
oV2 := new(objectV2.Object)
|
||||||
|
|
||||||
|
var hdr *objectV2.Header
|
||||||
|
|
||||||
|
switch v := resp.GetBody().GetHeaderPart().(type) {
|
||||||
|
case *objectV2.ShortHeader:
|
||||||
|
hdr = new(objectV2.Header)
|
||||||
|
|
||||||
|
var idV2 refsV2.ContainerID
|
||||||
|
h.cnr.WriteToV2(&idV2)
|
||||||
|
|
||||||
|
hdr.SetContainerID(&idV2)
|
||||||
|
hdr.SetVersion(v.GetVersion())
|
||||||
|
hdr.SetCreationEpoch(v.GetCreationEpoch())
|
||||||
|
hdr.SetOwnerID(v.GetOwnerID())
|
||||||
|
hdr.SetObjectType(v.GetObjectType())
|
||||||
|
hdr.SetPayloadLength(v.GetPayloadLength())
|
||||||
|
case *objectV2.HeaderWithSignature:
|
||||||
|
hdr = v.GetHeader()
|
||||||
|
}
|
||||||
|
|
||||||
|
oV2.SetHeader(hdr)
|
||||||
|
|
||||||
|
dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) {
|
||||||
|
if idObj != nil {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(cnr)
|
||||||
|
addr.SetObject(*idObj)
|
||||||
|
|
||||||
|
obj, err := h.storage.Head(context.TODO(), addr)
|
||||||
|
if err == nil {
|
||||||
|
return headersFromObject(obj, cnr, idObj), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return addressHeaders(cnr, idObj), false
|
||||||
|
}
|
||||||
|
|
||||||
|
func cidHeader(idCnr cid.ID) sysObjHdr {
|
||||||
|
return sysObjHdr{
|
||||||
|
k: acl.FilterObjectContainerID,
|
||||||
|
v: idCnr.EncodeToString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func oidHeader(obj oid.ID) sysObjHdr {
|
||||||
|
return sysObjHdr{
|
||||||
|
k: acl.FilterObjectID,
|
||||||
|
v: obj.EncodeToString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ownerIDHeader(ownerID user.ID) sysObjHdr {
|
||||||
|
return sysObjHdr{
|
||||||
|
k: acl.FilterObjectOwnerID,
|
||||||
|
v: ownerID.EncodeToString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
|
||||||
|
hh := make([]eaclSDK.Header, 0, 2)
|
||||||
|
hh = append(hh, cidHeader(cnr))
|
||||||
|
|
||||||
|
if oid != nil {
|
||||||
|
hh = append(hh, oidHeader(*oid))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hh
|
||||||
|
}
|
92
pkg/services/object/acl/eacl/v2/object.go
Normal file
92
pkg/services/object/acl/eacl/v2/object.go
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sysObjHdr struct {
|
||||||
|
k, v string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sysObjHdr) Key() string {
|
||||||
|
return s.k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s sysObjHdr) Value() string {
|
||||||
|
return s.v
|
||||||
|
}
|
||||||
|
|
||||||
|
func u64Value(v uint64) string {
|
||||||
|
return strconv.FormatUint(v, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header {
|
||||||
|
var count int
|
||||||
|
for obj := obj; obj != nil; obj = obj.Parent() {
|
||||||
|
count += 9 + len(obj.Attributes())
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]eaclSDK.Header, 0, count)
|
||||||
|
for ; obj != nil; obj = obj.Parent() {
|
||||||
|
res = append(res,
|
||||||
|
cidHeader(cnr),
|
||||||
|
// creation epoch
|
||||||
|
sysObjHdr{
|
||||||
|
k: acl.FilterObjectCreationEpoch,
|
||||||
|
v: u64Value(obj.CreationEpoch()),
|
||||||
|
},
|
||||||
|
// payload size
|
||||||
|
sysObjHdr{
|
||||||
|
k: acl.FilterObjectPayloadLength,
|
||||||
|
v: u64Value(obj.PayloadSize()),
|
||||||
|
},
|
||||||
|
// object version
|
||||||
|
sysObjHdr{
|
||||||
|
k: acl.FilterObjectVersion,
|
||||||
|
v: obj.Version().String(),
|
||||||
|
},
|
||||||
|
// object type
|
||||||
|
sysObjHdr{
|
||||||
|
k: acl.FilterObjectType,
|
||||||
|
v: obj.Type().String(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if oid != nil {
|
||||||
|
res = append(res, oidHeader(*oid))
|
||||||
|
}
|
||||||
|
|
||||||
|
if idOwner := obj.OwnerID(); !idOwner.IsEmpty() {
|
||||||
|
res = append(res, ownerIDHeader(idOwner))
|
||||||
|
}
|
||||||
|
|
||||||
|
cs, ok := obj.PayloadChecksum()
|
||||||
|
if ok {
|
||||||
|
res = append(res, sysObjHdr{
|
||||||
|
k: acl.FilterObjectPayloadHash,
|
||||||
|
v: cs.String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
cs, ok = obj.PayloadHomomorphicHash()
|
||||||
|
if ok {
|
||||||
|
res = append(res, sysObjHdr{
|
||||||
|
k: acl.FilterObjectHomomorphicHash,
|
||||||
|
v: cs.String(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs := obj.Attributes()
|
||||||
|
for i := range attrs {
|
||||||
|
res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
11
pkg/services/object/acl/eacl/v2/opts.go
Normal file
11
pkg/services/object/acl/eacl/v2/opts.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
func WithOID(v *oid.ID) Option {
|
||||||
|
return func(c *cfg) {
|
||||||
|
c.obj = v
|
||||||
|
}
|
||||||
|
}
|
69
pkg/services/object/acl/eacl/v2/xheader.go
Normal file
69
pkg/services/object/acl/eacl/v2/xheader.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
)
|
||||||
|
|
||||||
|
type XHeaderSource interface {
|
||||||
|
GetXHeaders() []eaclSDK.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
type requestXHeaderSource struct {
|
||||||
|
req Request
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRequestXHeaderSource(req Request) XHeaderSource {
|
||||||
|
return requestXHeaderSource{req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
type responseXHeaderSource struct {
|
||||||
|
resp Response
|
||||||
|
|
||||||
|
req Request
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource {
|
||||||
|
return responseXHeaderSource{resp: resp, req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header {
|
||||||
|
ln := 0
|
||||||
|
|
||||||
|
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
|
||||||
|
ln += len(meta.GetXHeaders())
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]eaclSDK.Header, 0, ln)
|
||||||
|
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
|
||||||
|
x := meta.GetXHeaders()
|
||||||
|
for i := range x {
|
||||||
|
res = append(res, (xHeader)(x[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header {
|
||||||
|
ln := 0
|
||||||
|
xHdrs := make([][]session.XHeader, 0)
|
||||||
|
|
||||||
|
for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() {
|
||||||
|
x := meta.GetXHeaders()
|
||||||
|
|
||||||
|
ln += len(x)
|
||||||
|
|
||||||
|
xHdrs = append(xHdrs, x)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]eaclSDK.Header, 0, ln)
|
||||||
|
|
||||||
|
for i := range xHdrs {
|
||||||
|
for j := range xHdrs[i] {
|
||||||
|
res = append(res, xHeader(xHdrs[i][j]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
20
pkg/services/object/acl/v2/errors.go
Normal file
20
pkg/services/object/acl/v2/errors.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const invalidRequestMessage = "malformed request"
|
||||||
|
|
||||||
|
func malformedRequestError(reason string) error {
|
||||||
|
return fmt.Errorf("%s: %s", invalidRequestMessage, reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyBody = malformedRequestError("empty body")
|
||||||
|
errEmptyVerificationHeader = malformedRequestError("empty verification header")
|
||||||
|
errEmptyBodySig = malformedRequestError("empty at body signature")
|
||||||
|
errInvalidSessionSig = malformedRequestError("invalid session token signature")
|
||||||
|
errInvalidSessionOwner = malformedRequestError("invalid session token owner")
|
||||||
|
errInvalidVerb = malformedRequestError("session token verb is invalid")
|
||||||
|
)
|
12
pkg/services/object/acl/v2/opts.go
Normal file
12
pkg/services/object/acl/v2/opts.go
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithLogger returns option to set logger.
|
||||||
|
func WithLogger(v *logger.Logger) Option {
|
||||||
|
return func(c *cfg) {
|
||||||
|
c.log = v
|
||||||
|
}
|
||||||
|
}
|
152
pkg/services/object/acl/v2/request.go
Normal file
152
pkg/services/object/acl/v2/request.go
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestInfo groups parsed version-independent (from SDK library)
|
||||||
|
// request information and raw API request.
|
||||||
|
type RequestInfo struct {
|
||||||
|
basicACL acl.Basic
|
||||||
|
requestRole acl.Role
|
||||||
|
operation acl.Op // put, get, head, etc.
|
||||||
|
cnrOwner user.ID // container owner
|
||||||
|
|
||||||
|
// cnrNamespace defined to which namespace a container is belonged.
|
||||||
|
cnrNamespace string
|
||||||
|
|
||||||
|
idCnr cid.ID
|
||||||
|
|
||||||
|
// optional for some request
|
||||||
|
// e.g. Put, Search
|
||||||
|
obj *oid.ID
|
||||||
|
|
||||||
|
senderKey []byte
|
||||||
|
|
||||||
|
bearer *bearer.Token // bearer token of request
|
||||||
|
|
||||||
|
srcRequest any
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) {
|
||||||
|
r.basicACL = basicACL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RequestInfo) SetRequestRole(requestRole acl.Role) {
|
||||||
|
r.requestRole = requestRole
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RequestInfo) SetSenderKey(senderKey []byte) {
|
||||||
|
r.senderKey = senderKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request returns raw API request.
|
||||||
|
func (r RequestInfo) Request() any {
|
||||||
|
return r.srcRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerOwner returns owner if the container.
|
||||||
|
func (r RequestInfo) ContainerOwner() user.ID {
|
||||||
|
return r.cnrOwner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r RequestInfo) ContainerNamespace() string {
|
||||||
|
return r.cnrNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectID return object ID.
|
||||||
|
func (r RequestInfo) ObjectID() *oid.ID {
|
||||||
|
return r.obj
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerID return container ID.
|
||||||
|
func (r RequestInfo) ContainerID() cid.ID {
|
||||||
|
return r.idCnr
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanBearer forces cleaning bearer token information.
|
||||||
|
func (r *RequestInfo) CleanBearer() {
|
||||||
|
r.bearer = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bearer returns bearer token of the request.
|
||||||
|
func (r RequestInfo) Bearer() *bearer.Token {
|
||||||
|
return r.bearer
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicACL returns basic ACL of the container.
|
||||||
|
func (r RequestInfo) BasicACL() acl.Basic {
|
||||||
|
return r.basicACL
|
||||||
|
}
|
||||||
|
|
||||||
|
// SenderKey returns public key of the request's sender.
|
||||||
|
func (r RequestInfo) SenderKey() []byte {
|
||||||
|
return r.senderKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// Operation returns request's operation.
|
||||||
|
func (r RequestInfo) Operation() acl.Op {
|
||||||
|
return r.operation
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestRole returns request sender's role.
|
||||||
|
func (r RequestInfo) RequestRole() acl.Role {
|
||||||
|
return r.requestRole
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetaWithToken groups session and bearer tokens,
|
||||||
|
// verification header and raw API request.
|
||||||
|
type MetaWithToken struct {
|
||||||
|
vheader *sessionV2.RequestVerificationHeader
|
||||||
|
token *sessionSDK.Object
|
||||||
|
bearer *bearer.Token
|
||||||
|
src any
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestOwner returns ownerID and its public key
|
||||||
|
// according to internal meta information.
|
||||||
|
func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) {
|
||||||
|
if r.vheader == nil {
|
||||||
|
return nil, nil, errEmptyVerificationHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.bearer != nil && r.bearer.Impersonate() {
|
||||||
|
return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// if session token is presented, use it as truth source
|
||||||
|
if r.token != nil {
|
||||||
|
// verify signature of session token
|
||||||
|
return ownerFromToken(r.token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// otherwise get original body signature
|
||||||
|
bodySignature := originalBodySignature(r.vheader)
|
||||||
|
if bodySignature == nil {
|
||||||
|
return nil, nil, errEmptyBodySig
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
|
||||||
|
key, err := unmarshalPublicKey(rawKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("invalid signature key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var idSender user.ID
|
||||||
|
user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
|
||||||
|
|
||||||
|
return &idSender, key, nil
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package ape
|
package v2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) {
|
||||||
vh.SetBodySignature(&userSignature)
|
vh.SetBodySignature(&userSignature)
|
||||||
|
|
||||||
t.Run("empty verification header", func(t *testing.T) {
|
t.Run("empty verification header", func(t *testing.T) {
|
||||||
req := Metadata{}
|
req := MetaWithToken{}
|
||||||
checkOwner(t, req, nil, errEmptyVerificationHeader)
|
checkOwner(t, req, nil, errEmptyVerificationHeader)
|
||||||
})
|
})
|
||||||
t.Run("empty verification header signature", func(t *testing.T) {
|
t.Run("empty verification header signature", func(t *testing.T) {
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: new(sessionV2.RequestVerificationHeader),
|
vheader: new(sessionV2.RequestVerificationHeader),
|
||||||
}
|
}
|
||||||
checkOwner(t, req, nil, errEmptyBodySig)
|
checkOwner(t, req, nil, errEmptyBodySig)
|
||||||
})
|
})
|
||||||
t.Run("no tokens", func(t *testing.T) {
|
t.Run("no tokens", func(t *testing.T) {
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
}
|
}
|
||||||
checkOwner(t, req, userPk.PublicKey(), nil)
|
checkOwner(t, req, userPk.PublicKey(), nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("bearer without impersonate, no session", func(t *testing.T) {
|
t.Run("bearer without impersonate, no session", func(t *testing.T) {
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
BearerToken: newBearer(t, containerOwner, userID, false),
|
bearer: newBearer(t, containerOwner, userID, false),
|
||||||
}
|
}
|
||||||
checkOwner(t, req, userPk.PublicKey(), nil)
|
checkOwner(t, req, userPk.PublicKey(), nil)
|
||||||
})
|
})
|
||||||
t.Run("bearer with impersonate, no session", func(t *testing.T) {
|
t.Run("bearer with impersonate, no session", func(t *testing.T) {
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
BearerToken: newBearer(t, containerOwner, userID, true),
|
bearer: newBearer(t, containerOwner, userID, true),
|
||||||
}
|
}
|
||||||
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
||||||
})
|
})
|
||||||
|
@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) {
|
||||||
pk, err := keys.NewPrivateKey()
|
pk, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
BearerToken: newBearer(t, containerOwner, userID, true),
|
bearer: newBearer(t, containerOwner, userID, true),
|
||||||
SessionToken: newSession(t, pk),
|
token: newSession(t, pk),
|
||||||
}
|
}
|
||||||
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
||||||
})
|
})
|
||||||
t.Run("with session", func(t *testing.T) {
|
t.Run("with session", func(t *testing.T) {
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
SessionToken: newSession(t, containerOwner),
|
token: newSession(t, containerOwner),
|
||||||
}
|
}
|
||||||
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
checkOwner(t, req, containerOwner.PublicKey(), nil)
|
||||||
})
|
})
|
||||||
|
@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) {
|
||||||
var tok sessionSDK.Object
|
var tok sessionSDK.Object
|
||||||
require.NoError(t, tok.ReadFromV2(tokV2))
|
require.NoError(t, tok.ReadFromV2(tokV2))
|
||||||
|
|
||||||
req := Metadata{
|
req := MetaWithToken{
|
||||||
VerificationHeader: vh,
|
vheader: vh,
|
||||||
SessionToken: &tok,
|
token: &tok,
|
||||||
}
|
}
|
||||||
checkOwner(t, req, nil, errInvalidSessionOwner)
|
checkOwner(t, req, nil, errInvalidSessionOwner)
|
||||||
})
|
})
|
||||||
|
@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool
|
||||||
return &tok
|
return &tok
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) {
|
func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) {
|
||||||
_, actual, err := req.RequestOwner()
|
_, actual, err := req.RequestOwner()
|
||||||
if expectedErr != nil {
|
if expectedErr != nil {
|
||||||
require.ErrorIs(t, err, expectedErr)
|
require.ErrorIs(t, err, expectedErr)
|
779
pkg/services/object/acl/v2/service.go
Normal file
779
pkg/services/object/acl/v2/service.go
Normal file
|
@ -0,0 +1,779 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Service checks basic ACL rules.
|
||||||
|
type Service struct {
|
||||||
|
*cfg
|
||||||
|
|
||||||
|
c objectCore.SenderClassifier
|
||||||
|
}
|
||||||
|
|
||||||
|
type putStreamBasicChecker struct {
|
||||||
|
source *Service
|
||||||
|
next object.PutObjectStream
|
||||||
|
}
|
||||||
|
|
||||||
|
type patchStreamBasicChecker struct {
|
||||||
|
source *Service
|
||||||
|
next object.PatchObjectStream
|
||||||
|
nonFirstSend bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option represents Service constructor option.
|
||||||
|
type Option func(*cfg)
|
||||||
|
|
||||||
|
type cfg struct {
|
||||||
|
log *logger.Logger
|
||||||
|
|
||||||
|
containers container.Source
|
||||||
|
|
||||||
|
irFetcher InnerRingFetcher
|
||||||
|
|
||||||
|
nm netmap.Source
|
||||||
|
|
||||||
|
next object.ServiceServer
|
||||||
|
}
|
||||||
|
|
||||||
|
// New is a constructor for object ACL checking service.
|
||||||
|
func New(next object.ServiceServer,
|
||||||
|
nm netmap.Source,
|
||||||
|
irf InnerRingFetcher,
|
||||||
|
cs container.Source,
|
||||||
|
opts ...Option,
|
||||||
|
) Service {
|
||||||
|
cfg := &cfg{
|
||||||
|
log: logger.NewLoggerWrapper(zap.L()),
|
||||||
|
next: next,
|
||||||
|
nm: nm,
|
||||||
|
irFetcher: irf,
|
||||||
|
containers: cs,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range opts {
|
||||||
|
opts[i](cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return Service{
|
||||||
|
cfg: cfg,
|
||||||
|
c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context.
|
||||||
|
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
|
||||||
|
type wrappedGetObjectStream struct {
|
||||||
|
object.GetObjectStream
|
||||||
|
|
||||||
|
requestInfo RequestInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrappedGetObjectStream) Context() context.Context {
|
||||||
|
return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{
|
||||||
|
Namespace: w.requestInfo.ContainerNamespace(),
|
||||||
|
ContainerOwner: w.requestInfo.ContainerOwner(),
|
||||||
|
SenderKey: w.requestInfo.SenderKey(),
|
||||||
|
Role: w.requestInfo.RequestRole(),
|
||||||
|
BearerToken: w.requestInfo.Bearer(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream {
|
||||||
|
return &wrappedGetObjectStream{
|
||||||
|
GetObjectStream: getObjectStream,
|
||||||
|
requestInfo: reqInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context.
|
||||||
|
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
|
||||||
|
type wrappedRangeStream struct {
|
||||||
|
object.GetObjectRangeStream
|
||||||
|
|
||||||
|
requestInfo RequestInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrappedRangeStream) Context() context.Context {
|
||||||
|
return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{
|
||||||
|
Namespace: w.requestInfo.ContainerNamespace(),
|
||||||
|
ContainerOwner: w.requestInfo.ContainerOwner(),
|
||||||
|
SenderKey: w.requestInfo.SenderKey(),
|
||||||
|
Role: w.requestInfo.RequestRole(),
|
||||||
|
BearerToken: w.requestInfo.Bearer(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream {
|
||||||
|
return &wrappedRangeStream{
|
||||||
|
GetObjectRangeStream: rangeStream,
|
||||||
|
requestInfo: reqInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedSearchStream propagates RequestContext into SearchStream's context.
|
||||||
|
// This allows to retrieve already calculated immutable request-specific values in next handler invocation.
|
||||||
|
type wrappedSearchStream struct {
|
||||||
|
object.SearchStream
|
||||||
|
|
||||||
|
requestInfo RequestInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wrappedSearchStream) Context() context.Context {
|
||||||
|
return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{
|
||||||
|
Namespace: w.requestInfo.ContainerNamespace(),
|
||||||
|
ContainerOwner: w.requestInfo.ContainerOwner(),
|
||||||
|
SenderKey: w.requestInfo.SenderKey(),
|
||||||
|
Role: w.requestInfo.RequestRole(),
|
||||||
|
BearerToken: w.requestInfo.Bearer(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream {
|
||||||
|
return &wrappedSearchStream{
|
||||||
|
SearchStream: searchStream,
|
||||||
|
requestInfo: reqInfo,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get implements ServiceServer interface, makes ACL checks and calls
|
||||||
|
// next Get method in the ServiceServer pipeline.
|
||||||
|
func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRequestBody(request.GetBody())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) {
|
||||||
|
streamer, err := b.next.Put(ctx)
|
||||||
|
|
||||||
|
return putStreamBasicChecker{
|
||||||
|
source: &b,
|
||||||
|
next: streamer,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) {
|
||||||
|
streamer, err := b.next.Patch(ctx)
|
||||||
|
|
||||||
|
return &patchStreamBasicChecker{
|
||||||
|
source: &b,
|
||||||
|
next: streamer,
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) Head(
|
||||||
|
ctx context.Context,
|
||||||
|
request *objectV2.HeadRequest,
|
||||||
|
) (*objectV2.HeadResponse, error) {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRequestBody(request.GetBody())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.Head(requestContext(ctx, reqInfo), request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
|
||||||
|
id, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, id, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.next.Search(request, newWrappedSearchStream(stream, reqInfo))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) Delete(
|
||||||
|
ctx context.Context,
|
||||||
|
request *objectV2.DeleteRequest,
|
||||||
|
) (*objectV2.DeleteResponse, error) {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRequestBody(request.GetBody())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.Delete(requestContext(ctx, reqInfo), request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRequestBody(request.GetBody())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo))
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context {
|
||||||
|
return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{
|
||||||
|
Namespace: reqInfo.ContainerNamespace(),
|
||||||
|
ContainerOwner: reqInfo.ContainerOwner(),
|
||||||
|
SenderKey: reqInfo.SenderKey(),
|
||||||
|
Role: reqInfo.RequestRole(),
|
||||||
|
BearerToken: reqInfo.Bearer(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) GetRangeHash(
|
||||||
|
ctx context.Context,
|
||||||
|
request *objectV2.GetRangeHashRequest,
|
||||||
|
) (*objectV2.GetRangeHashResponse, error) {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRequestBody(request.GetBody())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sTok, err := originalSessionToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok != nil {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.GetRangeHash(requestContext(ctx, reqInfo), request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID()
|
||||||
|
if idV2 == nil {
|
||||||
|
return nil, errors.New("missing object owner")
|
||||||
|
}
|
||||||
|
|
||||||
|
var idOwner user.ID
|
||||||
|
|
||||||
|
err = idOwner.ReadFromV2(*idV2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid object owner: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var sTok *sessionSDK.Object
|
||||||
|
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
return b.next.PutSingle(requestContext(ctx, reqInfo), request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
|
||||||
|
body := request.GetBody()
|
||||||
|
if body == nil {
|
||||||
|
return errEmptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
part := body.GetObjectPart()
|
||||||
|
if part, ok := part.(*objectV2.PutObjectPartInit); ok {
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
idV2 := part.GetHeader().GetOwnerID()
|
||||||
|
if idV2 == nil {
|
||||||
|
return errors.New("missing object owner")
|
||||||
|
}
|
||||||
|
|
||||||
|
var idOwner user.ID
|
||||||
|
|
||||||
|
err = idOwner.ReadFromV2(*idV2)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid object owner: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objV2 := part.GetObjectID()
|
||||||
|
var obj *oid.ID
|
||||||
|
|
||||||
|
if objV2 != nil {
|
||||||
|
obj = new(oid.ID)
|
||||||
|
|
||||||
|
err = obj.ReadFromV2(*objV2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sTok *sessionSDK.Object
|
||||||
|
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
ctx = requestContext(ctx, reqInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.next.Send(ctx, request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
|
||||||
|
var sTok *sessionSDK.Object
|
||||||
|
|
||||||
|
if tokV2 != nil {
|
||||||
|
sTok = new(sessionSDK.Object)
|
||||||
|
|
||||||
|
err := sTok.ReadFromV2(*tokV2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid session token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
|
||||||
|
// if session relates to object's removal, we don't check
|
||||||
|
// relation of the tombstone to the session here since user
|
||||||
|
// can't predict tomb's ID.
|
||||||
|
err = assertSessionRelation(*sTok, cnr, nil)
|
||||||
|
} else {
|
||||||
|
err = assertSessionRelation(*sTok, cnr, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sTok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
|
||||||
|
return p.next.CloseAndRecv(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
|
||||||
|
body := request.GetBody()
|
||||||
|
if body == nil {
|
||||||
|
return errEmptyBody
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.nonFirstSend {
|
||||||
|
p.nonFirstSend = true
|
||||||
|
|
||||||
|
cnr, err := getContainerIDFromRequest(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
objV2 := request.GetBody().GetAddress().GetObjectID()
|
||||||
|
if objV2 == nil {
|
||||||
|
return errors.New("missing oid")
|
||||||
|
}
|
||||||
|
obj := new(oid.ID)
|
||||||
|
err = obj.ReadFromV2(*objV2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var sTok *sessionSDK.Object
|
||||||
|
sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(request.GetMetaHeader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := MetaWithToken{
|
||||||
|
vheader: request.GetVerificationHeader(),
|
||||||
|
token: sTok,
|
||||||
|
bearer: bTok,
|
||||||
|
src: request,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo.obj = obj
|
||||||
|
|
||||||
|
ctx = requestContext(ctx, reqInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.next.Send(ctx, request)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) {
|
||||||
|
return p.next.CloseAndRecv(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) {
|
||||||
|
cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.token != nil {
|
||||||
|
currentEpoch, err := b.nm.Epoch(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, errors.New("can't fetch current epoch")
|
||||||
|
}
|
||||||
|
if req.token.ExpiredAt(currentEpoch) {
|
||||||
|
return info, new(apistatus.SessionTokenExpired)
|
||||||
|
}
|
||||||
|
if req.token.InvalidAt(currentEpoch) {
|
||||||
|
return info, fmt.Errorf("%s: token is invalid at %d epoch)",
|
||||||
|
invalidRequestMessage, currentEpoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !assertVerb(*req.token, op) {
|
||||||
|
return info, errInvalidVerb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// find request role and key
|
||||||
|
ownerID, ownerKey, err := req.RequestOwner()
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.basicACL = cnr.Value.BasicACL()
|
||||||
|
info.requestRole = res.Role
|
||||||
|
info.operation = op
|
||||||
|
info.cnrOwner = cnr.Value.Owner()
|
||||||
|
info.idCnr = idCnr
|
||||||
|
|
||||||
|
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
|
||||||
|
if hasNamespace {
|
||||||
|
info.cnrNamespace = cnrNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// it is assumed that at the moment the key will be valid,
|
||||||
|
// otherwise the request would not pass validation
|
||||||
|
info.senderKey = res.Key
|
||||||
|
|
||||||
|
// add bearer token if it is present in request
|
||||||
|
info.bearer = req.bearer
|
||||||
|
|
||||||
|
info.srcRequest = req.src
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert.
|
||||||
|
func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) {
|
||||||
|
cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.token != nil {
|
||||||
|
currentEpoch, err := b.nm.Epoch(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, errors.New("can't fetch current epoch")
|
||||||
|
}
|
||||||
|
if req.token.ExpiredAt(currentEpoch) {
|
||||||
|
return info, new(apistatus.SessionTokenExpired)
|
||||||
|
}
|
||||||
|
if req.token.InvalidAt(currentEpoch) {
|
||||||
|
return info, fmt.Errorf("%s: token is invalid at %d epoch)",
|
||||||
|
invalidRequestMessage, currentEpoch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// find request role and key
|
||||||
|
ownerID, ownerKey, err := req.RequestOwner()
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value)
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.basicACL = cnr.Value.BasicACL()
|
||||||
|
info.requestRole = res.Role
|
||||||
|
info.cnrOwner = cnr.Value.Owner()
|
||||||
|
info.idCnr = idCnr
|
||||||
|
|
||||||
|
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
|
||||||
|
if hasNamespace {
|
||||||
|
info.cnrNamespace = cnrNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// it is assumed that at the moment the key will be valid,
|
||||||
|
// otherwise the request would not pass validation
|
||||||
|
info.senderKey = res.Key
|
||||||
|
|
||||||
|
// add bearer token if it is present in request
|
||||||
|
info.bearer = req.bearer
|
||||||
|
|
||||||
|
info.srcRequest = req.src
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
11
pkg/services/object/acl/v2/types.go
Normal file
11
pkg/services/object/acl/v2/types.go
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
// InnerRingFetcher is an interface that must provide
|
||||||
|
// Inner Ring information.
|
||||||
|
type InnerRingFetcher interface {
|
||||||
|
// InnerRingKeys must return list of public keys of
|
||||||
|
// the actual inner ring.
|
||||||
|
InnerRingKeys(ctx context.Context) ([][]byte, error)
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
package ape
|
package v2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
|
@ -6,34 +6,57 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||||
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
||||||
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
|
var errMissingContainerID = errors.New("missing container ID")
|
||||||
if cidV2 != nil {
|
|
||||||
if err = cnrID.ReadFromV2(*cidV2); err != nil {
|
func getContainerIDFromRequest(req any) (cid.ID, error) {
|
||||||
return
|
var idV2 *refsV2.ContainerID
|
||||||
|
var id cid.ID
|
||||||
|
|
||||||
|
switch v := req.(type) {
|
||||||
|
case *objectV2.GetRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
case *objectV2.PutRequest:
|
||||||
|
part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit)
|
||||||
|
if !ok {
|
||||||
|
return cid.ID{}, errors.New("can't get container ID in chunk")
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
err = errMissingContainerID
|
idV2 = part.GetHeader().GetContainerID()
|
||||||
return
|
case *objectV2.HeadRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
case *objectV2.SearchRequest:
|
||||||
|
idV2 = v.GetBody().GetContainerID()
|
||||||
|
case *objectV2.DeleteRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
case *objectV2.GetRangeRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
case *objectV2.GetRangeHashRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
case *objectV2.PutSingleRequest:
|
||||||
|
idV2 = v.GetBody().GetObject().GetHeader().GetContainerID()
|
||||||
|
case *objectV2.PatchRequest:
|
||||||
|
idV2 = v.GetBody().GetAddress().GetContainerID()
|
||||||
|
default:
|
||||||
|
return cid.ID{}, errors.New("unknown request type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if objV2 != nil {
|
if idV2 == nil {
|
||||||
objID = new(oid.ID)
|
return cid.ID{}, errMissingContainerID
|
||||||
if err = objID.ReadFromV2(*objV2); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
return id, id.ReadFromV2(*idV2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// originalBearerToken goes down to original request meta header and fetches
|
// originalBearerToken goes down to original request meta header and fetches
|
||||||
|
@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er
|
||||||
return &tok, tok.ReadFromV2(*tokV2)
|
return &tok, tok.ReadFromV2(*tokV2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// originalSessionToken goes down to original request meta header and fetches
|
||||||
|
// session token from there.
|
||||||
|
func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) {
|
||||||
|
for header.GetOrigin() != nil {
|
||||||
|
header = header.GetOrigin()
|
||||||
|
}
|
||||||
|
|
||||||
|
tokV2 := header.GetSessionToken()
|
||||||
|
if tokV2 == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var tok sessionSDK.Object
|
||||||
|
|
||||||
|
err := tok.ReadFromV2(*tokV2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid session token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getObjectIDFromRequestBody decodes oid.ID from the common interface of the
|
||||||
|
// object reference's holders. Returns an error if object ID is missing in the request.
|
||||||
|
func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) {
|
||||||
|
idV2 := body.GetAddress().GetObjectID()
|
||||||
|
return getObjectIDFromRefObjectID(idV2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) {
|
||||||
|
if idV2 == nil {
|
||||||
|
return nil, errors.New("missing object ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
var id oid.ID
|
||||||
|
|
||||||
|
err := id.ReadFromV2(*idV2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &id, nil
|
||||||
|
}
|
||||||
|
|
||||||
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
|
func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) {
|
||||||
// 1. First check signature of session token.
|
// 1. First check signature of session token.
|
||||||
if !token.VerifySignature() {
|
if !token.VerifySignature() {
|
||||||
|
@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
|
||||||
return id2.Equals(id)
|
return id2.Equals(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertVerb checks that token verb corresponds to the method.
|
// assertVerb checks that token verb corresponds to op.
|
||||||
func assertVerb(tok sessionSDK.Object, method string) bool {
|
func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
|
||||||
switch method {
|
switch op {
|
||||||
case nativeschema.MethodPutObject:
|
case acl.OpObjectPut:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
|
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch)
|
||||||
case nativeschema.MethodDeleteObject:
|
case acl.OpObjectDelete:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
|
return tok.AssertVerb(sessionSDK.VerbObjectDelete)
|
||||||
case nativeschema.MethodGetObject:
|
case acl.OpObjectGet:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectGet)
|
return tok.AssertVerb(sessionSDK.VerbObjectGet)
|
||||||
case nativeschema.MethodHeadObject:
|
case acl.OpObjectHead:
|
||||||
return tok.AssertVerb(
|
return tok.AssertVerb(
|
||||||
sessionSDK.VerbObjectHead,
|
sessionSDK.VerbObjectHead,
|
||||||
sessionSDK.VerbObjectGet,
|
sessionSDK.VerbObjectGet,
|
||||||
|
@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool {
|
||||||
sessionSDK.VerbObjectRangeHash,
|
sessionSDK.VerbObjectRangeHash,
|
||||||
sessionSDK.VerbObjectPatch,
|
sessionSDK.VerbObjectPatch,
|
||||||
)
|
)
|
||||||
case nativeschema.MethodSearchObject:
|
case acl.OpObjectSearch:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
|
return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete)
|
||||||
case nativeschema.MethodRangeObject:
|
case acl.OpObjectRange:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
|
return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch)
|
||||||
case nativeschema.MethodHashObject:
|
case acl.OpObjectHash:
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
|
return tok.AssertVerb(sessionSDK.VerbObjectRangeHash)
|
||||||
case nativeschema.MethodPatchObject:
|
|
||||||
return tok.AssertVerb(sessionSDK.VerbObjectPatch)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) {
|
|
||||||
key, err := unmarshalPublicKey(rawKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("invalid signature key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var idSender user.ID
|
|
||||||
user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key))
|
|
||||||
|
|
||||||
return &idSender, key, nil
|
|
||||||
}
|
|
131
pkg/services/object/acl/v2/util_test.go
Normal file
131
pkg/services/object/acl/v2/util_test.go
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
||||||
|
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
|
||||||
|
aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
|
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOriginalTokens(t *testing.T) {
|
||||||
|
sToken := sessiontest.ObjectSigned()
|
||||||
|
bToken := bearertest.Token()
|
||||||
|
|
||||||
|
pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
require.NoError(t, bToken.Sign(*pk))
|
||||||
|
|
||||||
|
var bTokenV2 acl.BearerToken
|
||||||
|
bToken.WriteToV2(&bTokenV2)
|
||||||
|
// This line is needed because SDK uses some custom format for
|
||||||
|
// reserved filters, so `cid.ID` is not converted to string immediately.
|
||||||
|
require.NoError(t, bToken.ReadFromV2(bTokenV2))
|
||||||
|
|
||||||
|
var sTokenV2 session.Token
|
||||||
|
sToken.WriteToV2(&sTokenV2)
|
||||||
|
|
||||||
|
for i := range 10 {
|
||||||
|
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
|
||||||
|
res, err := originalSessionToken(metaHeaders)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, sToken, res, i)
|
||||||
|
|
||||||
|
bTok, err := originalBearerToken(metaHeaders)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, &bToken, bTok, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader {
|
||||||
|
metaHeader := new(session.RequestMetaHeader)
|
||||||
|
metaHeader.SetBearerToken(b)
|
||||||
|
metaHeader.SetSessionToken(s)
|
||||||
|
|
||||||
|
for range depth {
|
||||||
|
link := metaHeader
|
||||||
|
metaHeader = new(session.RequestMetaHeader)
|
||||||
|
metaHeader.SetOrigin(link)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metaHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsVerbCompatible(t *testing.T) {
|
||||||
|
// Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28
|
||||||
|
table := map[aclsdk.Op][]sessionSDK.ObjectVerb{
|
||||||
|
aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete},
|
||||||
|
aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete},
|
||||||
|
aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet},
|
||||||
|
aclsdk.OpObjectHead: {
|
||||||
|
sessionSDK.VerbObjectHead,
|
||||||
|
sessionSDK.VerbObjectGet,
|
||||||
|
sessionSDK.VerbObjectDelete,
|
||||||
|
sessionSDK.VerbObjectRange,
|
||||||
|
sessionSDK.VerbObjectRangeHash,
|
||||||
|
},
|
||||||
|
aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash},
|
||||||
|
aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash},
|
||||||
|
aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
|
||||||
|
}
|
||||||
|
|
||||||
|
verbs := []sessionSDK.ObjectVerb{
|
||||||
|
sessionSDK.VerbObjectPut,
|
||||||
|
sessionSDK.VerbObjectDelete,
|
||||||
|
sessionSDK.VerbObjectHead,
|
||||||
|
sessionSDK.VerbObjectRange,
|
||||||
|
sessionSDK.VerbObjectRangeHash,
|
||||||
|
sessionSDK.VerbObjectGet,
|
||||||
|
sessionSDK.VerbObjectSearch,
|
||||||
|
}
|
||||||
|
|
||||||
|
var tok sessionSDK.Object
|
||||||
|
|
||||||
|
for op, list := range table {
|
||||||
|
for _, verb := range verbs {
|
||||||
|
contains := slices.Contains(list, verb)
|
||||||
|
|
||||||
|
tok.ForVerb(verb)
|
||||||
|
|
||||||
|
require.Equal(t, contains, assertVerb(tok, op),
|
||||||
|
"%v in token, %s executing", verb, op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAssertSessionRelation(t *testing.T) {
|
||||||
|
var tok sessionSDK.Object
|
||||||
|
cnr := cidtest.ID()
|
||||||
|
cnrOther := cidtest.ID()
|
||||||
|
obj := oidtest.ID()
|
||||||
|
objOther := oidtest.ID()
|
||||||
|
|
||||||
|
// make sure ids differ, otherwise test won't work correctly
|
||||||
|
require.False(t, cnrOther.Equals(cnr))
|
||||||
|
require.False(t, objOther.Equals(obj))
|
||||||
|
|
||||||
|
// bind session to the container (required)
|
||||||
|
tok.BindContainer(cnr)
|
||||||
|
|
||||||
|
// test container-global session
|
||||||
|
require.NoError(t, assertSessionRelation(tok, cnr, nil))
|
||||||
|
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
|
||||||
|
require.Error(t, assertSessionRelation(tok, cnrOther, nil))
|
||||||
|
require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
|
||||||
|
|
||||||
|
// limit the session to the particular object
|
||||||
|
tok.LimitByObjects(obj)
|
||||||
|
|
||||||
|
// test fixed object session (here obj arg must be non-nil everywhere)
|
||||||
|
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
|
||||||
|
require.Error(t, assertSessionRelation(tok, cnr, &objOther))
|
||||||
|
}
|
|
@ -7,21 +7,6 @@ import (
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
errMissingContainerID = malformedRequestError("missing container ID")
|
|
||||||
errEmptyVerificationHeader = malformedRequestError("empty verification header")
|
|
||||||
errEmptyBodySig = malformedRequestError("empty at body signature")
|
|
||||||
errInvalidSessionSig = malformedRequestError("invalid session token signature")
|
|
||||||
errInvalidSessionOwner = malformedRequestError("invalid session token owner")
|
|
||||||
errInvalidVerb = malformedRequestError("session token verb is invalid")
|
|
||||||
)
|
|
||||||
|
|
||||||
func malformedRequestError(reason string) error {
|
|
||||||
invalidArgErr := &apistatus.InvalidArgument{}
|
|
||||||
invalidArgErr.SetMessage(reason)
|
|
||||||
return invalidArgErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func toStatusErr(err error) error {
|
func toStatusErr(err error) error {
|
||||||
var chRouterErr *checkercore.ChainRouterError
|
var chRouterErr *checkercore.ChainRouterError
|
||||||
if !errors.As(err, &chRouterErr) {
|
if !errors.As(err, &chRouterErr) {
|
||||||
|
|
|
@ -1,172 +0,0 @@
|
||||||
package ape
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
|
||||||
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Metadata struct {
|
|
||||||
Container cid.ID
|
|
||||||
Object *oid.ID
|
|
||||||
MetaHeader *session.RequestMetaHeader
|
|
||||||
VerificationHeader *session.RequestVerificationHeader
|
|
||||||
SessionToken *sessionSDK.Object
|
|
||||||
BearerToken *bearer.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) {
|
|
||||||
if m.VerificationHeader == nil {
|
|
||||||
return nil, nil, errEmptyVerificationHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.BearerToken != nil && m.BearerToken.Impersonate() {
|
|
||||||
return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// if session token is presented, use it as truth source
|
|
||||||
if m.SessionToken != nil {
|
|
||||||
// verify signature of session token
|
|
||||||
return ownerFromToken(m.SessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
// otherwise get original body signature
|
|
||||||
bodySignature := originalBodySignature(m.VerificationHeader)
|
|
||||||
if bodySignature == nil {
|
|
||||||
return nil, nil, errEmptyBodySig
|
|
||||||
}
|
|
||||||
|
|
||||||
return unmarshalPublicKeyWithOwner(bodySignature.GetKey())
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequestInfo contains request information extracted by request metadata.
|
|
||||||
type RequestInfo struct {
|
|
||||||
// Role defines under which role this request is executed.
|
|
||||||
// It must be represented only as a constant represented in native schema.
|
|
||||||
Role string
|
|
||||||
|
|
||||||
ContainerOwner user.ID
|
|
||||||
|
|
||||||
// Namespace defines to which namespace a container is belonged.
|
|
||||||
Namespace string
|
|
||||||
|
|
||||||
// HEX-encoded sender key.
|
|
||||||
SenderKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
type RequestInfoExtractor interface {
|
|
||||||
GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type extractor struct {
|
|
||||||
containers container.Source
|
|
||||||
|
|
||||||
nm netmap.Source
|
|
||||||
|
|
||||||
classifier objectCore.SenderClassifier
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor {
|
|
||||||
return &extractor{
|
|
||||||
containers: containers,
|
|
||||||
nm: nm,
|
|
||||||
classifier: objectCore.NewSenderClassifier(irFetcher, nm, log),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error {
|
|
||||||
currentEpoch, err := e.nm.Epoch(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("can't fetch current epoch")
|
|
||||||
}
|
|
||||||
if sessionToken.ExpiredAt(currentEpoch) {
|
|
||||||
return new(apistatus.SessionTokenExpired)
|
|
||||||
}
|
|
||||||
if sessionToken.InvalidAt(currentEpoch) {
|
|
||||||
return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch)
|
|
||||||
}
|
|
||||||
if !assertVerb(*sessionToken, method) {
|
|
||||||
return errInvalidVerb
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) {
|
|
||||||
cnr, err := e.containers.Get(ctx, m.Container)
|
|
||||||
if err != nil {
|
|
||||||
return ri, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if m.SessionToken != nil {
|
|
||||||
if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil {
|
|
||||||
return ri, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ownerID, ownerKey, err := m.RequestOwner()
|
|
||||||
if err != nil {
|
|
||||||
return ri, err
|
|
||||||
}
|
|
||||||
res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value)
|
|
||||||
if err != nil {
|
|
||||||
return ri, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ri.Role = nativeSchemaRole(res.Role)
|
|
||||||
ri.ContainerOwner = cnr.Value.Owner()
|
|
||||||
|
|
||||||
cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns")
|
|
||||||
if hasNamespace {
|
|
||||||
ri.Namespace = cnrNamespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// it is assumed that at the moment the key will be valid,
|
|
||||||
// otherwise the request would not pass validation
|
|
||||||
ri.SenderKey = hex.EncodeToString(res.Key)
|
|
||||||
|
|
||||||
return ri, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) {
|
|
||||||
var sTok *sessionSDK.Object
|
|
||||||
|
|
||||||
if tokV2 != nil {
|
|
||||||
sTok = new(sessionSDK.Object)
|
|
||||||
|
|
||||||
err := sTok.ReadFromV2(*tokV2)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid session token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
|
|
||||||
// if session relates to object's removal, we don't check
|
|
||||||
// relation of the tombstone to the session here since user
|
|
||||||
// can't predict tomb's ID.
|
|
||||||
err = assertSessionRelation(*sTok, cnr, nil)
|
|
||||||
} else {
|
|
||||||
err = assertSessionRelation(*sTok, cnr, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sTok, nil
|
|
||||||
}
|
|
|
@ -2,6 +2,9 @@ package ape
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||||
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
|
||||||
|
@ -9,18 +12,19 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
|
||||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
apeChecker Checker
|
apeChecker Checker
|
||||||
|
|
||||||
extractor RequestInfoExtractor
|
|
||||||
|
|
||||||
next objectSvc.ServiceServer
|
next objectSvc.ServiceServer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,10 +64,9 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service {
|
func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
|
||||||
return &Service{
|
return &Service{
|
||||||
apeChecker: apeChecker,
|
apeChecker: apeChecker,
|
||||||
extractor: extractor,
|
|
||||||
next: next,
|
next: next,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -73,9 +76,15 @@ type getStreamBasicChecker struct {
|
||||||
|
|
||||||
apeChecker Checker
|
apeChecker Checker
|
||||||
|
|
||||||
metadata Metadata
|
namespace string
|
||||||
|
|
||||||
reqInfo RequestInfo
|
senderKey []byte
|
||||||
|
|
||||||
|
containerOwner user.ID
|
||||||
|
|
||||||
|
role string
|
||||||
|
|
||||||
|
bearerToken *bearer.Token
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
|
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
|
||||||
|
@ -86,15 +95,15 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := Prm{
|
prm := Prm{
|
||||||
Namespace: g.reqInfo.Namespace,
|
Namespace: g.namespace,
|
||||||
Container: cnrID,
|
Container: cnrID,
|
||||||
Object: objID,
|
Object: objID,
|
||||||
Header: partInit.GetHeader(),
|
Header: partInit.GetHeader(),
|
||||||
Method: nativeschema.MethodGetObject,
|
Method: nativeschema.MethodGetObject,
|
||||||
SenderKey: g.reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(g.senderKey),
|
||||||
ContainerOwner: g.reqInfo.ContainerOwner,
|
ContainerOwner: g.containerOwner,
|
||||||
Role: g.reqInfo.Role,
|
Role: g.role,
|
||||||
BearerToken: g.metadata.BearerToken,
|
BearerToken: g.bearerToken,
|
||||||
XHeaders: resp.GetMetaHeader().GetXHeaders(),
|
XHeaders: resp.GetMetaHeader().GetXHeaders(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,53 +114,69 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
|
||||||
return g.GetObjectStream.Send(resp)
|
return g.GetObjectStream.Send(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) {
|
||||||
|
untyped := ctx.Value(objectSvc.RequestContextKey)
|
||||||
|
if untyped == nil {
|
||||||
|
return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey)
|
||||||
|
}
|
||||||
|
rc, ok := untyped.(*objectSvc.RequestContext)
|
||||||
|
if !ok {
|
||||||
|
return nil, errFailedToCastToRequestContext
|
||||||
|
}
|
||||||
|
return rc, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
|
func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error {
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
reqCtx, err := requestContext(stream.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.next.Get(request, &getStreamBasicChecker{
|
return c.next.Get(request, &getStreamBasicChecker{
|
||||||
GetObjectStream: stream,
|
GetObjectStream: stream,
|
||||||
apeChecker: c.apeChecker,
|
apeChecker: c.apeChecker,
|
||||||
metadata: md,
|
namespace: reqCtx.Namespace,
|
||||||
reqInfo: reqInfo,
|
senderKey: reqCtx.SenderKey,
|
||||||
|
containerOwner: reqCtx.ContainerOwner,
|
||||||
|
role: nativeSchemaRole(reqCtx.Role),
|
||||||
|
bearerToken: reqCtx.BearerToken,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type putStreamBasicChecker struct {
|
type putStreamBasicChecker struct {
|
||||||
apeChecker Checker
|
apeChecker Checker
|
||||||
|
|
||||||
extractor RequestInfoExtractor
|
|
||||||
|
|
||||||
next objectSvc.PutObjectStream
|
next objectSvc.PutObjectStream
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
|
func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
|
||||||
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
|
if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
|
||||||
md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := Prm{
|
prm := Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Header: partInit.GetHeader(),
|
Header: partInit.GetHeader(),
|
||||||
Method: nativeschema.MethodPutObject,
|
Method: nativeschema.MethodPutObject,
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
|
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
|
||||||
|
@ -171,7 +196,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
|
||||||
|
|
||||||
return &putStreamBasicChecker{
|
return &putStreamBasicChecker{
|
||||||
apeChecker: c.apeChecker,
|
apeChecker: c.apeChecker,
|
||||||
extractor: c.extractor,
|
|
||||||
next: streamer,
|
next: streamer,
|
||||||
}, err
|
}, err
|
||||||
}
|
}
|
||||||
|
@ -179,36 +203,40 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) {
|
||||||
type patchStreamBasicChecker struct {
|
type patchStreamBasicChecker struct {
|
||||||
apeChecker Checker
|
apeChecker Checker
|
||||||
|
|
||||||
extractor RequestInfoExtractor
|
|
||||||
|
|
||||||
next objectSvc.PatchObjectStream
|
next objectSvc.PatchObjectStream
|
||||||
|
|
||||||
nonFirstSend bool
|
nonFirstSend bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
|
func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error {
|
||||||
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
if !p.nonFirstSend {
|
if !p.nonFirstSend {
|
||||||
p.nonFirstSend = true
|
p.nonFirstSend = true
|
||||||
|
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject)
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := Prm{
|
prm := Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Method: nativeschema.MethodPatchObject,
|
Method: nativeschema.MethodPatchObject,
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
|
if err := p.apeChecker.CheckAPE(ctx, prm); err != nil {
|
||||||
|
@ -228,17 +256,22 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error
|
||||||
|
|
||||||
return &patchStreamBasicChecker{
|
return &patchStreamBasicChecker{
|
||||||
apeChecker: c.apeChecker,
|
apeChecker: c.apeChecker,
|
||||||
extractor: c.extractor,
|
|
||||||
next: streamer,
|
next: streamer,
|
||||||
}, err
|
}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
|
func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject)
|
|
||||||
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -252,7 +285,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
|
||||||
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
|
switch headerPart := resp.GetBody().GetHeaderPart().(type) {
|
||||||
case *objectV2.ShortHeader:
|
case *objectV2.ShortHeader:
|
||||||
cidV2 := new(refs.ContainerID)
|
cidV2 := new(refs.ContainerID)
|
||||||
md.Container.WriteToV2(cidV2)
|
cnrID.WriteToV2(cidV2)
|
||||||
header.SetContainerID(cidV2)
|
header.SetContainerID(cidV2)
|
||||||
header.SetVersion(headerPart.GetVersion())
|
header.SetVersion(headerPart.GetVersion())
|
||||||
header.SetCreationEpoch(headerPart.GetCreationEpoch())
|
header.SetCreationEpoch(headerPart.GetCreationEpoch())
|
||||||
|
@ -268,16 +301,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.apeChecker.CheckAPE(ctx, Prm{
|
err = c.apeChecker.CheckAPE(ctx, Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Header: header,
|
Header: header,
|
||||||
Method: nativeschema.MethodHeadObject,
|
Method: nativeschema.MethodHeadObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toStatusErr(err)
|
return nil, toStatusErr(err)
|
||||||
|
@ -286,24 +319,32 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
|
func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
|
||||||
md, err := newMetadata(request, request.GetBody().GetContainerID(), nil)
|
meta := request.GetMetaHeader()
|
||||||
if err != nil {
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
return err
|
meta = origin
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject)
|
|
||||||
|
var cnrID cid.ID
|
||||||
|
if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil {
|
||||||
|
if err := cnrID.ReadFromV2(*cnrV2); err != nil {
|
||||||
|
return toStatusErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
reqCtx, err := requestContext(stream.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
|
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Method: nativeschema.MethodSearchObject,
|
Method: nativeschema.MethodSearchObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toStatusErr(err)
|
return toStatusErr(err)
|
||||||
|
@ -313,25 +354,31 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
|
func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject)
|
|
||||||
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.apeChecker.CheckAPE(ctx, Prm{
|
err = c.apeChecker.CheckAPE(ctx, Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Method: nativeschema.MethodDeleteObject,
|
Method: nativeschema.MethodDeleteObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toStatusErr(err)
|
return nil, toStatusErr(err)
|
||||||
|
@ -346,25 +393,31 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
|
func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error {
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
meta := request.GetMetaHeader()
|
||||||
if err != nil {
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
return err
|
meta = origin
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject)
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return toStatusErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqCtx, err := requestContext(stream.Context())
|
||||||
|
if err != nil {
|
||||||
|
return toStatusErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
|
err = c.apeChecker.CheckAPE(stream.Context(), Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Method: nativeschema.MethodRangeObject,
|
Method: nativeschema.MethodRangeObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toStatusErr(err)
|
return toStatusErr(err)
|
||||||
|
@ -374,25 +427,31 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
|
func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
|
||||||
md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject)
|
|
||||||
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := Prm{
|
prm := Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Method: nativeschema.MethodHashObject,
|
Method: nativeschema.MethodHashObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := c.next.GetRangeHash(ctx, request)
|
resp, err := c.next.GetRangeHash(ctx, request)
|
||||||
|
@ -407,26 +466,32 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
|
func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) {
|
||||||
md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
|
meta := request.GetMetaHeader()
|
||||||
|
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
||||||
|
meta = origin
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject)
|
|
||||||
|
reqCtx, err := requestContext(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := Prm{
|
prm := Prm{
|
||||||
Namespace: reqInfo.Namespace,
|
Namespace: reqCtx.Namespace,
|
||||||
Container: md.Container,
|
Container: cnrID,
|
||||||
Object: md.Object,
|
Object: objID,
|
||||||
Header: request.GetBody().GetObject().GetHeader(),
|
Header: request.GetBody().GetObject().GetHeader(),
|
||||||
Method: nativeschema.MethodPutObject,
|
Method: nativeschema.MethodPutObject,
|
||||||
Role: reqInfo.Role,
|
Role: nativeSchemaRole(reqCtx.Role),
|
||||||
SenderKey: reqInfo.SenderKey,
|
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
|
||||||
ContainerOwner: reqInfo.ContainerOwner,
|
ContainerOwner: reqCtx.ContainerOwner,
|
||||||
BearerToken: md.BearerToken,
|
BearerToken: reqCtx.BearerToken,
|
||||||
XHeaders: md.MetaHeader.GetXHeaders(),
|
XHeaders: meta.GetXHeaders(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
|
if err = c.apeChecker.CheckAPE(ctx, prm); err != nil {
|
||||||
|
@ -436,36 +501,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ
|
||||||
return c.next.PutSingle(ctx, request)
|
return c.next.PutSingle(ctx, request)
|
||||||
}
|
}
|
||||||
|
|
||||||
type request interface {
|
func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) {
|
||||||
GetMetaHeader() *session.RequestMetaHeader
|
if cidV2 != nil {
|
||||||
GetVerificationHeader() *session.RequestVerificationHeader
|
if err = cnrID.ReadFromV2(*cidV2); err != nil {
|
||||||
}
|
return
|
||||||
|
}
|
||||||
func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) {
|
|
||||||
meta := request.GetMetaHeader()
|
|
||||||
for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() {
|
|
||||||
meta = origin
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2)
|
if objV2 != nil {
|
||||||
if err != nil {
|
objID = new(oid.ID)
|
||||||
return
|
if err = objID.ReadFromV2(*objV2); err != nil {
|
||||||
}
|
return
|
||||||
session, err := readSessionToken(cnrID, objID, meta.GetSessionToken())
|
}
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bearer, err := originalBearerToken(request.GetMetaHeader())
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
md = Metadata{
|
|
||||||
Container: cnrID,
|
|
||||||
Object: objID,
|
|
||||||
VerificationHeader: request.GetVerificationHeader(),
|
|
||||||
SessionToken: session,
|
|
||||||
BearerToken: bearer,
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,11 +7,3 @@ import "context"
|
||||||
type Checker interface {
|
type Checker interface {
|
||||||
CheckAPE(context.Context, Prm) error
|
CheckAPE(context.Context, Prm) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// InnerRingFetcher is an interface that must provide
|
|
||||||
// Inner Ring information.
|
|
||||||
type InnerRingFetcher interface {
|
|
||||||
// InnerRingKeys must return list of public keys of
|
|
||||||
// the actual inner ring.
|
|
||||||
InnerRingKeys(ctx context.Context) ([][]byte, error)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
package ape
|
|
||||||
|
|
||||||
import (
|
|
||||||
"slices"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
||||||
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIsVerbCompatible(t *testing.T) {
|
|
||||||
table := map[string][]sessionSDK.ObjectVerb{
|
|
||||||
nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch},
|
|
||||||
nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete},
|
|
||||||
nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet},
|
|
||||||
nativeschema.MethodHeadObject: {
|
|
||||||
sessionSDK.VerbObjectHead,
|
|
||||||
sessionSDK.VerbObjectGet,
|
|
||||||
sessionSDK.VerbObjectDelete,
|
|
||||||
sessionSDK.VerbObjectRange,
|
|
||||||
sessionSDK.VerbObjectRangeHash,
|
|
||||||
sessionSDK.VerbObjectPatch,
|
|
||||||
},
|
|
||||||
nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch},
|
|
||||||
nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash},
|
|
||||||
nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete},
|
|
||||||
nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch},
|
|
||||||
}
|
|
||||||
|
|
||||||
verbs := []sessionSDK.ObjectVerb{
|
|
||||||
sessionSDK.VerbObjectPut,
|
|
||||||
sessionSDK.VerbObjectDelete,
|
|
||||||
sessionSDK.VerbObjectHead,
|
|
||||||
sessionSDK.VerbObjectRange,
|
|
||||||
sessionSDK.VerbObjectRangeHash,
|
|
||||||
sessionSDK.VerbObjectGet,
|
|
||||||
sessionSDK.VerbObjectSearch,
|
|
||||||
sessionSDK.VerbObjectPatch,
|
|
||||||
}
|
|
||||||
|
|
||||||
var tok sessionSDK.Object
|
|
||||||
|
|
||||||
for op, list := range table {
|
|
||||||
for _, verb := range verbs {
|
|
||||||
contains := slices.Contains(list, verb)
|
|
||||||
|
|
||||||
tok.ForVerb(verb)
|
|
||||||
|
|
||||||
require.Equal(t, contains, assertVerb(tok, op),
|
|
||||||
"%v in token, %s executing", verb, op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAssertSessionRelation(t *testing.T) {
|
|
||||||
var tok sessionSDK.Object
|
|
||||||
cnr := cidtest.ID()
|
|
||||||
cnrOther := cidtest.ID()
|
|
||||||
obj := oidtest.ID()
|
|
||||||
objOther := oidtest.ID()
|
|
||||||
|
|
||||||
// make sure ids differ, otherwise test won't work correctly
|
|
||||||
require.False(t, cnrOther.Equals(cnr))
|
|
||||||
require.False(t, objOther.Equals(obj))
|
|
||||||
|
|
||||||
// bind session to the container (required)
|
|
||||||
tok.BindContainer(cnr)
|
|
||||||
|
|
||||||
// test container-global session
|
|
||||||
require.NoError(t, assertSessionRelation(tok, cnr, nil))
|
|
||||||
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
|
|
||||||
require.Error(t, assertSessionRelation(tok, cnrOther, nil))
|
|
||||||
require.Error(t, assertSessionRelation(tok, cnrOther, &obj))
|
|
||||||
|
|
||||||
// limit the session to the particular object
|
|
||||||
tok.LimitByObjects(obj)
|
|
||||||
|
|
||||||
// test fixed object session (here obj arg must be non-nil everywhere)
|
|
||||||
require.NoError(t, assertSessionRelation(tok, cnr, &obj))
|
|
||||||
require.Error(t, assertSessionRelation(tok, cnr, &objOther))
|
|
||||||
}
|
|
|
@ -130,7 +130,7 @@ func TestECWriter(t *testing.T) {
|
||||||
nodeKey, err := keys.NewPrivateKey()
|
nodeKey, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
log, err := logger.NewLogger(logger.Prm{})
|
log, err := logger.NewLogger(nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var n nmKeys
|
var n nmKeys
|
||||||
|
|
24
pkg/services/object/request_context.go
Normal file
24
pkg/services/object/request_context.go
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
package object
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RequestContextKeyT struct{}
|
||||||
|
|
||||||
|
var RequestContextKey = RequestContextKeyT{}
|
||||||
|
|
||||||
|
// RequestContext is a context passed between middleware handlers.
|
||||||
|
type RequestContext struct {
|
||||||
|
Namespace string
|
||||||
|
|
||||||
|
SenderKey []byte
|
||||||
|
|
||||||
|
ContainerOwner user.ID
|
||||||
|
|
||||||
|
Role acl.Role
|
||||||
|
|
||||||
|
BearerToken *bearer.Token
|
||||||
|
}
|
|
@ -99,8 +99,8 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
||||||
metrics.NewUnaryClientInterceptor(),
|
metrics.NewUnaryClientInterceptor(),
|
||||||
tracing.NewUnaryClientInterceptor(),
|
tracing.NewUnaryClientInteceptor(),
|
||||||
tagging.NewUnaryClientInterceptor(),
|
tagging.NewUnaryClientInteceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithChainStreamInterceptor(
|
grpc.WithChainStreamInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
||||||
|
|
|
@ -85,7 +85,7 @@ func New(opts ...Option) *Service {
|
||||||
|
|
||||||
// Start starts the service.
|
// Start starts the service.
|
||||||
func (s *Service) Start(ctx context.Context) {
|
func (s *Service) Start(ctx context.Context) {
|
||||||
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String())
|
ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String())
|
||||||
go s.replicateLoop(ctx)
|
go s.replicateLoop(ctx)
|
||||||
go s.syncLoop(ctx)
|
go s.syncLoop(ctx)
|
||||||
|
|
||||||
|
|
|
@ -344,8 +344,8 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
|
||||||
grpc.WithChainUnaryInterceptor(
|
grpc.WithChainUnaryInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(),
|
||||||
metrics.NewUnaryClientInterceptor(),
|
metrics.NewUnaryClientInterceptor(),
|
||||||
tracing_grpc.NewUnaryClientInterceptor(),
|
tracing_grpc.NewUnaryClientInteceptor(),
|
||||||
tagging.NewUnaryClientInterceptor(),
|
tagging.NewUnaryClientInteceptor(),
|
||||||
),
|
),
|
||||||
grpc.WithChainStreamInterceptor(
|
grpc.WithChainStreamInterceptor(
|
||||||
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
qos.NewAdjustOutgoingIOTagStreamClientInterceptor(),
|
||||||
|
|
|
@ -23,8 +23,16 @@ type Logger struct {
|
||||||
// Parameters that have been connected to the Logger support its
|
// Parameters that have been connected to the Logger support its
|
||||||
// configuration changing.
|
// configuration changing.
|
||||||
//
|
//
|
||||||
// See also Logger.Reload, SetLevelString.
|
// Passing Prm after a successful connection via the NewLogger, connects
|
||||||
|
// the Prm to a new instance of the Logger.
|
||||||
|
//
|
||||||
|
// See also Reload, SetLevelString.
|
||||||
type Prm struct {
|
type Prm struct {
|
||||||
|
// link to the created Logger
|
||||||
|
// instance; used for a runtime
|
||||||
|
// reconfiguration
|
||||||
|
_log *Logger
|
||||||
|
|
||||||
// support runtime rereading
|
// support runtime rereading
|
||||||
level zapcore.Level
|
level zapcore.Level
|
||||||
|
|
||||||
|
@ -65,6 +73,22 @@ func (p *Prm) SetDestination(d string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reload reloads configuration of a connected instance of the Logger.
|
||||||
|
// Returns ErrLoggerNotConnected if no connection has been performed.
|
||||||
|
// Returns any reconfiguration error from the Logger directly.
|
||||||
|
func (p Prm) Reload() error {
|
||||||
|
if p._log == nil {
|
||||||
|
// incorrect logger usage
|
||||||
|
panic("parameters are not connected to any Logger")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p._log.reload(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultPrm() *Prm {
|
||||||
|
return new(Prm)
|
||||||
|
}
|
||||||
|
|
||||||
// NewLogger constructs a new zap logger instance. Constructing with nil
|
// NewLogger constructs a new zap logger instance. Constructing with nil
|
||||||
// parameters is safe: default values will be used then.
|
// parameters is safe: default values will be used then.
|
||||||
// Passing non-nil parameters after a successful creation (non-error) allows
|
// Passing non-nil parameters after a successful creation (non-error) allows
|
||||||
|
@ -76,7 +100,10 @@ func (p *Prm) SetDestination(d string) error {
|
||||||
// - ISO8601 time encoding.
|
// - ISO8601 time encoding.
|
||||||
//
|
//
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
// Logger records a stack trace for all messages at or above fatal level.
|
||||||
func NewLogger(prm Prm) (*Logger, error) {
|
func NewLogger(prm *Prm) (*Logger, error) {
|
||||||
|
if prm == nil {
|
||||||
|
prm = defaultPrm()
|
||||||
|
}
|
||||||
switch prm.dest {
|
switch prm.dest {
|
||||||
case DestinationUndefined, DestinationStdout:
|
case DestinationUndefined, DestinationStdout:
|
||||||
return newConsoleLogger(prm)
|
return newConsoleLogger(prm)
|
||||||
|
@ -87,7 +114,7 @@ func NewLogger(prm Prm) (*Logger, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConsoleLogger(prm Prm) (*Logger, error) {
|
func newConsoleLogger(prm *Prm) (*Logger, error) {
|
||||||
lvl := zap.NewAtomicLevelAt(prm.level)
|
lvl := zap.NewAtomicLevelAt(prm.level)
|
||||||
|
|
||||||
c := zap.NewProductionConfig()
|
c := zap.NewProductionConfig()
|
||||||
|
@ -112,11 +139,12 @@ func newConsoleLogger(prm Prm) (*Logger, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
l := &Logger{z: lZap, lvl: lvl}
|
l := &Logger{z: lZap, lvl: lvl}
|
||||||
|
prm._log = l
|
||||||
|
|
||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJournaldLogger(prm Prm) (*Logger, error) {
|
func newJournaldLogger(prm *Prm) (*Logger, error) {
|
||||||
lvl := zap.NewAtomicLevelAt(prm.level)
|
lvl := zap.NewAtomicLevelAt(prm.level)
|
||||||
|
|
||||||
c := zap.NewProductionConfig()
|
c := zap.NewProductionConfig()
|
||||||
|
@ -153,12 +181,14 @@ func newJournaldLogger(prm Prm) (*Logger, error) {
|
||||||
lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1))
|
lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1))
|
||||||
|
|
||||||
l := &Logger{z: lZap, lvl: lvl}
|
l := &Logger{z: lZap, lvl: lvl}
|
||||||
|
prm._log = l
|
||||||
|
|
||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Logger) Reload(prm Prm) {
|
func (l *Logger) reload(prm Prm) error {
|
||||||
l.lvl.SetLevel(prm.level)
|
l.lvl.SetLevel(prm.level)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Logger) WithOptions(options ...zap.Option) {
|
func (l *Logger) WithOptions(options ...zap.Option) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue