diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 4234de160..e21ce61c5 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -68,14 +68,14 @@ async { } task('pre-commit') { - dockerfile(""" - FROM ${golangDefault} - RUN apt update && \ - apt install -y --no-install-recommends pre-commit - """) { - withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { - sh 'pre-commit run --color=always --hook-stage=manual --all-files' - } + sh ''' + apt update + apt install -y --no-install-recommends pre-commit + ''' // TODO: Make an OCI image for pre-commit + golang? Unpack golang tarball with a library function? + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' } } } + +// TODO: dco check diff --git a/.golangci.yml b/.golangci.yml index e3ec09f60..f21a46248 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,107 +1,101 @@ -version: "2" +# This file contains all available configuration options +# with their default values. + +# options for analysis running run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 20m + + # include test files or not, default is true tests: false + +# output configuration options output: + # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - tab: - path: stdout - colors: false + - format: tab + +# all available settings of specific linters +linters-settings: + exhaustive: + # indicates that switch statements are to be considered exhaustive if a + # 'default' case is present, even if all enum members aren't listed in the + # switch + default-signifies-exhaustive: true + gci: + sections: + - standard + - default + custom-order: true + govet: + # report about shadowed variables + check-shadowing: false + staticcheck: + checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. + funlen: + lines: 80 # default 60 + statements: 60 # default 40 + gocognit: + min-complexity: 40 # default 30 + importas: + no-unaliased: true + no-extra-aliases: false + alias: + pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + target-methods : ["reportFlushError", "reportError"] + disable-packages: ["codes", "err", "res","exec"] + constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + linters: - default: none enable: - - bidichk - - containedctx - - contextcheck - - copyloopvar - - durationcheck - - errcheck - - exhaustive - - funlen - - gocognit - - gocritic - - godot - - importas - - ineffassign - - intrange - - misspell - - perfsprint - - predeclared - - protogetter - - reassign + # mandatory linters + - govet - revive + + # some default golangci-lint linters + - errcheck + - gosimple + - godot + - ineffassign - staticcheck - - testifylint - - truecloudlab-linters - - unconvert - - unparam + - typecheck - unused - - usetesting - - whitespace - settings: - exhaustive: - default-signifies-exhaustive: true - funlen: - lines: 80 - statements: 60 - gocognit: - min-complexity: 40 - gocritic: - disabled-checks: - - ifElseChain - importas: - alias: - - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - no-unaliased: true - no-extra-aliases: false - staticcheck: - checks: - - all - - -QF1002 - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs - disable-packages: - - codes - - err - - res - - exec - target-methods: - - reportFlushError - - reportError - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: + + # extra linters + - bidichk + - durationcheck + - exhaustive + - copyloopvar - gci - gofmt - goimports - settings: - gci: - sections: - - standard - - default - custom-order: true - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ + - misspell + - predeclared + - reassign + - whitespace + - containedctx + - funlen + - gocognit + - contextcheck + - importas + - truecloudlab-linters + - perfsprint + - testifylint + - protogetter + - intrange + - tenv + - unconvert + - unparam + disable-all: true + fast: false diff --git a/Makefile b/Makefile index 575eaae6f..cd80fc72e 100755 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ #!/usr/bin/make -f SHELL = bash -.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") @@ -9,8 +8,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.23 -LINT_VERSION ?= 2.0.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 +LINT_VERSION ?= 1.62.2 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 @@ -116,7 +115,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir -p $(PROTOBUF_DIR) + @mkdir $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -170,7 +169,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir -p $(GOFUMPT_DIR) + @mkdir $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -187,44 +186,21 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... -# Install Gerrit commit-msg hook -review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks -review-install: - @git config remote.review.url \ - || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node - @mkdir -p $(GIT_HOOK_DIR)/ - @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg - @chmod +x $(GIT_HOOK_DIR)/commit-msg - @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg - @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg - -# Create a PR in Gerrit -review: BRANCH ?= master -review: - @git push review HEAD:refs/for/$(BRANCH) \ - --push-option r=e.stratonikov@yadro.com \ - --push-option r=d.stepanov@yadro.com \ - --push-option r=an.nikiforov@yadro.com \ - --push-option r=a.arifullin@yadro.com \ - --push-option r=ekaterina.lebedeva@yadro.com \ - --push-option r=a.savchuk@yadro.com \ - --push-option r=a.chuprov@yadro.com - # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: $(BIN) +lint-install: @rm -rf $(OUTPUT_LINT_DIR) - @mkdir -p $(OUTPUT_LINT_DIR) + @mkdir $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: @@ -236,7 +212,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir -p $(STATICCHECK_DIR) + @mkdir $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -249,7 +225,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir -p $(GOPLS_DIR) + @mkdir $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go deleted file mode 100644 index d67b70d2a..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/root.go +++ /dev/null @@ -1,15 +0,0 @@ -package maintenance - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" - "github.com/spf13/cobra" -) - -var RootCmd = &cobra.Command{ - Use: "maintenance", - Short: "Section for maintenance commands", -} - -func init() { - RootCmd.AddCommand(zombie.Cmd) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go deleted file mode 100644 index 1b66889aa..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go +++ /dev/null @@ -1,70 +0,0 @@ -package zombie - -import ( - "crypto/ecdsa" - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { - keyDesc := viper.GetString(walletFlag) - if keyDesc == "" { - return &nodeconfig.Key(appCfg).PrivateKey - } - data, err := os.ReadFile(keyDesc) - commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) - - priv, err := keys.NewPrivateKeyFromBytes(data) - if err != nil { - w, err := wallet.NewWalletFromFile(keyDesc) - commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) - return fromWallet(cmd, w, viper.GetString(addressFlag)) - } - return &priv.PrivateKey -} - -func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { - var ( - addr util.Uint160 - err error - ) - - if addrStr == "" { - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) - } - - acc := w.GetAccount(addr) - if acc == nil { - commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) - } - - pass, err := getPassword() - commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) - - commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) - - return &acc.PrivateKey().PrivateKey -} - -func getPassword() (string, error) { - // this check allows empty passwords - if viper.IsSet("password") { - return viper.GetString("password"), nil - } - - return input.ReadPassword("Enter password > ") -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go deleted file mode 100644 index f73f33db9..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go +++ /dev/null @@ -1,31 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func list(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - var containerID *cid.ID - if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { - containerID = &cid.ID{} - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - } - - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { - if containerID != nil && a.Container() != *containerID { - return nil - } - cmd.Println(a.EncodeToString()) - return nil - })) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go deleted file mode 100644 index cd3a64499..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go +++ /dev/null @@ -1,46 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" - nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "github.com/spf13/cobra" -) - -func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { - addresses := morphconfig.RPCEndpoint(appCfg) - if len(addresses) == 0 { - commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) - } - key := nodeconfig.Key(appCfg) - cli, err := client.New(cmd.Context(), - key, - client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), - client.WithEndpoints(addresses...), - client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), - ) - commonCmd.ExitOnErr(cmd, "create morph client: %w", err) - return cli -} - -func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { - hs, err := morph.NNSContractAddress(client.NNSContainerContractName) - commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) - cc, err := cntClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) - return cc -} - -func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { - hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) - commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) - cli, err := netmapClient.NewFromMorph(morph, hs, 0) - commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) - return cli -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go deleted file mode 100644 index 27f83aec7..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go +++ /dev/null @@ -1,154 +0,0 @@ -package zombie - -import ( - "context" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "sync" - - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -type quarantine struct { - // mtx protects current field. - mtx sync.Mutex - current int - trees []*fstree.FSTree -} - -func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { - var paths []string - for _, sh := range engineInfo.Shards { - var storagePaths []string - for _, st := range sh.BlobStorInfo.SubStorages { - storagePaths = append(storagePaths, st.Path) - } - if len(storagePaths) == 0 { - continue - } - paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) - } - q, err := newQuarantine(paths) - commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) - return q -} - -func commonPath(paths []string) string { - if len(paths) == 0 { - return "" - } - if len(paths) == 1 { - return paths[0] - } - minLen := math.MaxInt - for _, p := range paths { - if len(p) < minLen { - minLen = len(p) - } - } - - var sb strings.Builder - for i := range minLen { - for _, path := range paths[1:] { - if paths[0][i] != path[i] { - return sb.String() - } - } - sb.WriteByte(paths[0][i]) - } - return sb.String() -} - -func newQuarantine(paths []string) (*quarantine, error) { - var q quarantine - for i := range paths { - f := fstree.New( - fstree.WithDepth(1), - fstree.WithDirNameLen(1), - fstree.WithPath(paths[i]), - fstree.WithPerm(os.ModePerm), - ) - if err := f.Open(mode.ComponentReadWrite); err != nil { - return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) - } - if err := f.Init(); err != nil { - return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) - } - q.trees = append(q.trees, f) - } - return &q, nil -} - -func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { - for i := range q.trees { - res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) - if err != nil { - continue - } - return res.Object, nil - } - return nil, &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { - for i := range q.trees { - _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) - if err != nil { - continue - } - return nil - } - return &apistatus.ObjectNotFound{} -} - -func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { - data, err := obj.Marshal() - if err != nil { - return err - } - - var prm common.PutPrm - prm.Address = objectcore.AddressOf(obj) - prm.Object = obj - prm.RawData = data - - q.mtx.Lock() - current := q.current - q.current = (q.current + 1) % len(q.trees) - q.mtx.Unlock() - - _, err = q.trees[current].Put(ctx, prm) - return err -} - -func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { - var prm common.IteratePrm - prm.Handler = func(elem common.IterationElement) error { - return f(elem.Address) - } - for i := range q.trees { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - _, err := q.trees[i].Iterate(ctx, prm) - if err != nil { - return err - } - } - return nil -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go deleted file mode 100644 index 0b8f2f172..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -package zombie - -import ( - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func remove(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - removeObject(cmd, q, addr) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - removeObject(cmd, q, addr) - return nil - })) - } -} - -func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { - err := q.Delete(cmd.Context(), addr) - if errors.Is(err, new(apistatus.ObjectNotFound)) { - return - } - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go deleted file mode 100644 index f179c7c2d..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go +++ /dev/null @@ -1,69 +0,0 @@ -package zombie - -import ( - "crypto/sha256" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" -) - -func restore(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - storageEngine := newEngine(cmd, appCfg) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - - var containerID cid.ID - cidStr, _ := cmd.Flags().GetString(cidFlag) - commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) - - var objectID *oid.ID - oidStr, _ := cmd.Flags().GetString(oidFlag) - if oidStr != "" { - objectID = &oid.ID{} - commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) - } - - if objectID != nil { - var addr oid.Address - addr.SetContainer(containerID) - addr.SetObject(*objectID) - restoreObject(cmd, storageEngine, q, addr, cnrCli) - } else { - commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { - if addr.Container() != containerID { - return nil - } - restoreObject(cmd, storageEngine, q, addr, cnrCli) - return nil - })) - } -} - -func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { - obj, err := q.Get(cmd.Context(), addr) - commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) - rawCID := make([]byte, sha256.Size) - - cid := addr.Container() - cid.Encode(rawCID) - cnr, err := cnrCli.Get(cmd.Context(), rawCID) - commonCmd.ExitOnErr(cmd, "get container: %w", err) - - putPrm := engine.PutPrm{ - Object: obj, - IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), - } - commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) - commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go deleted file mode 100644 index c8fd9e5e5..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go +++ /dev/null @@ -1,123 +0,0 @@ -package zombie - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -const ( - flagBatchSize = "batch-size" - flagBatchSizeUsage = "Objects iteration batch size" - cidFlag = "cid" - cidFlagUsage = "Container ID" - oidFlag = "oid" - oidFlagUsage = "Object ID" - walletFlag = "wallet" - walletFlagShorthand = "w" - walletFlagUsage = "Path to the wallet or binary key" - addressFlag = "address" - addressFlagUsage = "Address of wallet account" - moveFlag = "move" - moveFlagUsage = "Move objects from storage engine to quarantine" -) - -var ( - Cmd = &cobra.Command{ - Use: "zombie", - Short: "Zombie objects related commands", - } - scanCmd = &cobra.Command{ - Use: "scan", - Short: "Scan storage engine for zombie objects and move them to quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) - _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) - _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) - _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) - }, - Run: scan, - } - listCmd = &cobra.Command{ - Use: "list", - Short: "List zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - }, - Run: list, - } - restoreCmd = &cobra.Command{ - Use: "restore", - Short: "Restore zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: restore, - } - removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove zombie objects from quarantine", - Long: "", - PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) - _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) - _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) - _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) - }, - Run: remove, - } -) - -func init() { - initScanCmd() - initListCmd() - initRestoreCmd() - initRemoveCmd() -} - -func initScanCmd() { - Cmd.AddCommand(scanCmd) - - scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) - scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) - scanCmd.Flags().String(addressFlag, "", addressFlagUsage) - scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) -} - -func initListCmd() { - Cmd.AddCommand(listCmd) - - listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - listCmd.Flags().String(cidFlag, "", cidFlagUsage) -} - -func initRestoreCmd() { - Cmd.AddCommand(restoreCmd) - - restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) - restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) -} - -func initRemoveCmd() { - Cmd.AddCommand(removeCmd) - - removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) - removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) - removeCmd.Flags().String(cidFlag, "", cidFlagUsage) - removeCmd.Flags().String(oidFlag, "", oidFlagUsage) -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go deleted file mode 100644 index 268ec4911..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go +++ /dev/null @@ -1,281 +0,0 @@ -package zombie - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" - netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" - clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" -) - -func scan(cmd *cobra.Command, _ []string) { - configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) - configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) - appCfg := config.New(configFile, configDir, config.EnvPrefix) - batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) - if batchSize == 0 { - commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) - } - move, _ := cmd.Flags().GetBool(moveFlag) - - storageEngine := newEngine(cmd, appCfg) - morphClient := createMorphClient(cmd, appCfg) - cnrCli := createContainerClient(cmd, morphClient) - nmCli := createNetmapClient(cmd, morphClient) - q := createQuarantine(cmd, storageEngine.DumpInfo()) - pk := getPrivateKey(cmd, appCfg) - - epoch, err := nmCli.Epoch(cmd.Context()) - commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) - - nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) - commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) - - cmd.Printf("Epoch: %d\n", nm.Epoch()) - cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) - - ps := &processStatus{ - statusCount: make(map[status]uint64), - } - - stopCh := make(chan struct{}) - start := time.Now() - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - tick := time.NewTicker(time.Second) - defer tick.Stop() - for { - select { - case <-cmd.Context().Done(): - return - case <-stopCh: - return - case <-tick.C: - fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) - } - } - }() - go func() { - defer wg.Done() - err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) - close(stopCh) - }() - wg.Wait() - commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) - - cmd.Println() - cmd.Println("Status description:") - cmd.Println("undefined -- nothing is clear") - cmd.Println("found -- object is found in cluster") - cmd.Println("quarantine -- object is not found in cluster") - cmd.Println() - for status, count := range ps.statusCount { - cmd.Printf("Status: %s, Count: %d\n", status, count) - } -} - -type status string - -const ( - statusUndefined status = "undefined" - statusFound status = "found" - statusQuarantine status = "quarantine" -) - -func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { - rawCID := make([]byte, sha256.Size) - cid := obj.Address.Container() - cid.Encode(rawCID) - - cnr, err := cnrCli.Get(ctx, rawCID) - if err != nil { - var errContainerNotFound *apistatus.ContainerNotFound - if errors.As(err, &errContainerNotFound) { - // Policer will deal with this object. - return statusFound, nil - } - return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) - } - nm, err := nmCli.NetMap(ctx) - if err != nil { - return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) - } - - nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) - if err != nil { - // Not enough nodes, check all netmap nodes. - nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) - } - - objID := obj.Address.Object() - cnrID := obj.Address.Container() - local := true - raw := false - if obj.ECInfo != nil { - objID = obj.ECInfo.ParentID - local = false - raw = true - } - prm := clientSDK.PrmObjectHead{ - ObjectID: &objID, - ContainerID: &cnrID, - Local: local, - Raw: raw, - } - - var ni clientCore.NodeInfo - for i := range nodes { - for j := range nodes[i] { - if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { - return statusUndefined, fmt.Errorf("parse node info: %w", err) - } - c, err := cc.Get(ni) - if err != nil { - continue - } - res, err := c.ObjectHead(ctx, prm) - if err != nil { - var errECInfo *objectSDK.ECInfoError - if raw && errors.As(err, &errECInfo) { - return statusFound, nil - } - continue - } - if err := apistatus.ErrFromStatus(res.Status()); err != nil { - continue - } - return statusFound, nil - } - } - - if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { - return statusFound, nil - } - return statusQuarantine, nil -} - -func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, - appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, -) error { - cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ - DialTimeout: apiclientconfig.DialTimeout(appCfg), - StreamTimeout: apiclientconfig.StreamTimeout(appCfg), - ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), - Key: pk, - AllowExternal: apiclientconfig.AllowExternal(appCfg), - }) - ctx := cmd.Context() - - var cursor *engine.Cursor - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var prm engine.ListWithCursorPrm - prm.WithCursor(cursor) - prm.WithCount(batchSize) - - res, err := storageEngine.ListWithCursor(ctx, prm) - if err != nil { - if errors.Is(err, engine.ErrEndOfListing) { - return nil - } - return fmt.Errorf("list with cursor: %w", err) - } - - cursor = res.Cursor() - addrList := res.AddressList() - eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(int(batchSize)) - - for i := range addrList { - addr := addrList[i] - eg.Go(func() error { - result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) - if err != nil { - return fmt.Errorf("check object %s status: %w", addr.Address, err) - } - ps.add(result) - - if !move && result == statusQuarantine { - cmd.Println(addr) - return nil - } - - if result == statusQuarantine { - return moveToQuarantine(egCtx, storageEngine, q, addr.Address) - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return fmt.Errorf("process objects batch: %w", err) - } - } -} - -func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { - var getPrm engine.GetPrm - getPrm.WithAddress(addr) - res, err := storageEngine.Get(ctx, getPrm) - if err != nil { - return fmt.Errorf("get object %s from storage engine: %w", addr, err) - } - - if err := q.Put(ctx, res.Object()); err != nil { - return fmt.Errorf("put object %s to quarantine: %w", addr, err) - } - - var delPrm engine.DeletePrm - delPrm.WithForceRemoval() - delPrm.WithAddress(addr) - - if err = storageEngine.Delete(ctx, delPrm); err != nil { - return fmt.Errorf("delete object %s from storage engine: %w", addr, err) - } - return nil -} - -type processStatus struct { - guard sync.RWMutex - statusCount map[status]uint64 - count uint64 -} - -func (s *processStatus) add(st status) { - s.guard.Lock() - defer s.guard.Unlock() - s.statusCount[st]++ - s.count++ -} - -func (s *processStatus) total() uint64 { - s.guard.RLock() - defer s.guard.RUnlock() - return s.count -} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go deleted file mode 100644 index 5be34d502..000000000 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ /dev/null @@ -1,201 +0,0 @@ -package zombie - -import ( - "context" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" - shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" - blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" - fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - "github.com/panjf2000/ants/v2" - "github.com/spf13/cobra" - "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { - ngOpts := storageEngineOptions(c) - shardOpts := shardOptions(cmd, c) - e := engine.New(ngOpts...) - for _, opts := range shardOpts { - _, err := e.AddShard(cmd.Context(), opts...) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - } - commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) - commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) - return e -} - -func storageEngineOptions(c *config.Config) []engine.Option { - return []engine.Option{ - engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), - engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), - } -} - -func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { - var result [][]shard.Option - err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { - result = append(result, getShardOpts(cmd, c, sh)) - return nil - }) - commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) - return result -} - -func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { - wc, wcEnabled := getWriteCacheOpts(sh) - return []shard.Option{ - shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - shard.WithRefillMetabase(sh.RefillMetabase()), - shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), - shard.WithMode(sh.Mode()), - shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), - shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), - shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), - shard.WithWriteCache(wcEnabled), - shard.WithWriteCacheOptions(wc), - shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), - shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), - shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), - shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) - return pool - }), - shard.WithLimiter(qos.NewNoopLimiter()), - } -} - -func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { - if wc := sh.WriteCache(); wc != nil && wc.Enabled() { - var result []writecache.Option - result = append(result, - writecache.WithPath(wc.Path()), - writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), - writecache.WithMaxObjectSize(wc.MaxObjectSize()), - writecache.WithFlushWorkersCount(wc.WorkerCount()), - writecache.WithMaxCacheSize(wc.SizeLimit()), - writecache.WithMaxCacheCount(wc.CountLimit()), - writecache.WithNoSync(wc.NoSync()), - writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - writecache.WithQoSLimiter(qos.NewNoopLimiter()), - ) - return result, true - } - return nil, false -} - -func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { - var piloramaOpts []pilorama.Option - if config.BoolSafe(c.Sub("tree"), "enabled") { - pr := sh.Pilorama() - piloramaOpts = append(piloramaOpts, - pilorama.WithPath(pr.Path()), - pilorama.WithPerm(pr.Perm()), - pilorama.WithNoSync(pr.NoSync()), - pilorama.WithMaxBatchSize(pr.MaxBatchSize()), - pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), - ) - } - return piloramaOpts -} - -func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { - return []meta.Option{ - meta.WithPath(sh.Metabase().Path()), - meta.WithPermissions(sh.Metabase().BoltDB().Perm()), - meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), - meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), - meta.WithBoltDBOptions(&bbolt.Options{ - Timeout: 100 * time.Millisecond, - }), - meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - meta.WithEpochState(&epochState{}), - } -} - -func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { - result := []blobstor.Option{ - blobstor.WithCompression(sh.Compression()), - blobstor.WithStorages(getSubStorages(ctx, sh)), - blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - return result -} - -func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { - var ss []blobstor.SubStorage - for _, storage := range sh.BlobStor().Storages() { - switch storage.Type() { - case blobovniczatree.Type: - sub := blobovniczaconfig.From((*config.Config)(storage)) - blobTreeOpts := []blobovniczatree.Option{ - blobovniczatree.WithRootPath(storage.Path()), - blobovniczatree.WithPermissions(storage.Perm()), - blobovniczatree.WithBlobovniczaSize(sub.Size()), - blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), - blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), - blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), - blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), - blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), - blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), - blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), - blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), - blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), - Policy: func(_ *objectSDK.Object, data []byte) bool { - return uint64(len(data)) < sh.SmallSizeLimit() - }, - }) - case fstree.Type: - sub := fstreeconfig.From((*config.Config)(storage)) - fstreeOpts := []fstree.Option{ - fstree.WithPath(storage.Path()), - fstree.WithPerm(storage.Perm()), - fstree.WithDepth(sub.Depth()), - fstree.WithNoSync(sub.NoSync()), - fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), - } - - ss = append(ss, blobstor.SubStorage{ - Storage: fstree.New(fstreeOpts...), - Policy: func(_ *objectSDK.Object, _ []byte) bool { - return true - }, - }) - default: - // should never happen, that has already - // been handled: when the config was read - } - } - return ss -} - -type epochState struct{} - -func (epochState) CurrentEpoch() uint64 { - return 0 -} diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index 23dba14f4..be42f2aa5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -162,7 +161,9 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - assert.NoError(w.Err) + if w.Err != nil { + panic(w.Err) + } alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -225,7 +226,9 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - assert.NoError(w.Err) + if w.Err != nil { + panic(w.Err) + } res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index c17fb62ff..f64cb4817 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -63,7 +63,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf, v) + copy(nbuf[:], v) n := binary.LittleEndian.Uint64(nbuf) _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index 79685f111..e72dc15e9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -236,7 +235,9 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - assert.NoError(bw.Err) + if bw.Err != nil { + panic(bw.Err) + } if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 543b5fcb3..5adb480da 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -121,7 +120,9 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - assert.NoError(writer.Err, "can't create deployment script") + if writer.Err != nil { + panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) + } if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -172,8 +173,9 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - assert.NoError(bw.Err, "can't create deployment script") - if bw.Len() != start { + if bw.Err != nil { + panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) + } else if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index fde58fd2b..fb7e4ff62 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -237,17 +236,21 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - assert.NoError(sub.Err, "can't create version script") + if sub.Err != nil { + panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) + } script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.WriteBytes(script) + bw.BinWriter.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - assert.NoError(bw.Err, "can't create version script") + if bw.Err != nil { + panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) + } res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 7f777db98..8ae606f1a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,7 +1,6 @@ package frostfsid import ( - "encoding/hex" "errors" "fmt" "math/big" @@ -605,7 +604,7 @@ func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclie cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) pk := "" if subj.PrimaryKey != nil { - pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) + pk = subj.PrimaryKey.String() } cmd.Printf("Primary key: %s\n", pk) cmd.Printf("Name: %s\n", subj.Name) @@ -615,7 +614,7 @@ func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclie for _, key := range subj.AdditionalKeys { k := "" if key != nil { - k = hex.EncodeToString(key.Bytes()) + k = key.String() } cmd.Printf("- %s\n", k) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 50b5c1ec7..961ceba53 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,7 +6,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -14,7 +13,9 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" + nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" @@ -186,9 +187,19 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) + switch c.(type) { + case *rpcclient.Client: + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) + default: + b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) + if err != nil { + return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) + } + + return b, nil + } } func CheckNotaryEnabled(c Client) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index da5ffedae..8e5615baa 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -22,7 +21,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -30,6 +28,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -376,7 +375,9 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - assert.False(withConsensus, "BUG: should never happen") + if withConsensus { + panic("BUG: should never happen") + } act, err = c.CommitteeAct, nil } if err != nil { @@ -410,9 +411,11 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - // error appears only if client - // has not been initialized - assert.NoError(err) + if err != nil { + // error appears only if client + // has not been initialized + panic(err) + } network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -444,12 +447,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { - assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } - if i == len(tx.Scripts) { + } else if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) + } else { + panic("BUG: invalid signing order") } return nil } @@ -507,7 +510,9 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - assert.NoError(bw.Err) + if bw.Err != nil { + panic(bw.Err) + } return bw.Bytes(), false, nil } @@ -519,8 +524,12 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) - return !avail, err + res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) + if err != nil { + return false, err + } + + return res.State == vmstate.Halt.String(), nil } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 46611c177..d0a05d5c7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -317,7 +316,9 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - assert.NoError(err) + if err != nil { + panic(err) + } defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -358,7 +359,9 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) + if w.Err != nil { + panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) + } return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index 176356378..e127ca545 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -112,7 +111,9 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - assert.NoError(w.Err, "can't wrap register script") + if w.Err != nil { + panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) + } } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 7b7597d91..4c6607f9a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,18 +1,21 @@ package initialize import ( + "errors" "fmt" "math/big" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -27,8 +30,7 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - reader := neo.NewReader(c.ReadOnlyInvoker) - regPrice, err := reader.GetRegisterPrice() + regPrice, err := getCandidateRegisterPrice(c) if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -40,7 +42,9 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - assert.NoError(w.Err) + if w.Err != nil { + panic(fmt.Sprintf("BUG: %v", w.Err)) + } signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), @@ -112,7 +116,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c) + ok, err := transferNEOFinished(c, neoHash) if ok || err != nil { return err } @@ -135,8 +139,33 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext) (bool, error) { - r := neo.NewReader(c.ReadOnlyInvoker) +func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { + r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } + +var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") + +func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { + switch c.Client.(type) { + case *rpcclient.Client: + inv := invoker.New(c.Client, nil) + reader := neo.NewReader(inv) + return reader.GetRegisterPrice() + default: + neoHash := neo.Hash + res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) + if err != nil { + return 0, err + } + if len(res.Stack) == 0 { + return 0, errGetPriceInvalid + } + bi, err := res.Stack[0].TryInteger() + if err != nil || !bi.IsInt64() { + return 0, errGetPriceInvalid + } + return bi.Int64(), nil + } +} diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index cc8225c7a..defd898c8 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,9 +5,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -41,8 +41,8 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) + rootCmd.AddCommand(storagecfg.RootCmd) rootCmd.AddCommand(metabase.RootCmd) - rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go new file mode 100644 index 000000000..67e3414c2 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go @@ -0,0 +1,135 @@ +package storagecfg + +const configTemplate = `logger: + level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" + +node: + wallet: + path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented + address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented + password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented + addresses: # list of addresses announced by Storage node in the Network map + - {{ .AnnouncedAddress }} + attribute_0: UN-LOCODE:{{ .Attribute.Locode }} + relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map + +grpc: + num: 1 # total number of listener endpoints + 0: + endpoint: {{ .Endpoint }} # endpoint for gRPC server + tls:{{if .TLSCert}} + enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) + certificate: {{ .TLSCert }} # path to TLS certificate + key: {{ .TLSKey }} # path to TLS key + {{- else }} + enabled: false # disable TLS for a gRPC connection + {{- end}} + +control: + authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service + {{- range .AuthorizedKeys }} + - {{.}}{{end}} + grpc: + endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service + +morph: + dial_timeout: 20s # timeout for side chain NEO RPC client connection + cache_ttl: 15s # use TTL cache for side chain GET operations + rpc_endpoint: # side chain N3 RPC endpoints + {{- range .MorphRPC }} + - address: wss://{{.}}/ws{{end}} +{{if not .Relay }} +storage: + shard: + default: # section with the default shard parameters + metabase: + perm: 0644 # permissions for metabase files(directories: +x for current user and group) + + blobstor: + perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + depth: 2 # max depth of object tree storage in FS + small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes + compress: true # turn on/off Zstandard compression (level 3) of stored objects + compression_exclude_content_types: + - audio/* + - video/* + + blobovnicza: + size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes + depth: 1 # max depth of object tree storage in key-value DB + width: 4 # max width of object tree storage in key-value DB + opened_cache_capacity: 50 # maximum number of opened database files + opened_cache_ttl: 5m # ttl for opened database file + opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's + + gc: + remover_batch_size: 200 # number of objects to be removed by the garbage collector + remover_sleep_interval: 5m # frequency of the garbage collector invocation + 0: + mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" + + metabase: + path: {{ .MetabasePath }} # path to the metabase + + blobstor: + path: {{ .BlobstorPath }} # path to the blobstor +{{end}}` + +const ( + neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" + balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" + neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" + balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" +) + +var n3config = map[string]struct { + MorphRPC []string + RPC []string + NeoFSContract string + BalanceContract string +}{ + "testnet": { + MorphRPC: []string{ + "rpc01.morph.testnet.fs.neo.org:51331", + "rpc02.morph.testnet.fs.neo.org:51331", + "rpc03.morph.testnet.fs.neo.org:51331", + "rpc04.morph.testnet.fs.neo.org:51331", + "rpc05.morph.testnet.fs.neo.org:51331", + "rpc06.morph.testnet.fs.neo.org:51331", + "rpc07.morph.testnet.fs.neo.org:51331", + }, + RPC: []string{ + "rpc01.testnet.n3.nspcc.ru:21331", + "rpc02.testnet.n3.nspcc.ru:21331", + "rpc03.testnet.n3.nspcc.ru:21331", + "rpc04.testnet.n3.nspcc.ru:21331", + "rpc05.testnet.n3.nspcc.ru:21331", + "rpc06.testnet.n3.nspcc.ru:21331", + "rpc07.testnet.n3.nspcc.ru:21331", + }, + NeoFSContract: neofsTestnetAddress, + BalanceContract: balanceTestnetAddress, + }, + "mainnet": { + MorphRPC: []string{ + "rpc1.morph.fs.neo.org:40341", + "rpc2.morph.fs.neo.org:40341", + "rpc3.morph.fs.neo.org:40341", + "rpc4.morph.fs.neo.org:40341", + "rpc5.morph.fs.neo.org:40341", + "rpc6.morph.fs.neo.org:40341", + "rpc7.morph.fs.neo.org:40341", + }, + RPC: []string{ + "rpc1.n3.nspcc.ru:10331", + "rpc2.n3.nspcc.ru:10331", + "rpc3.n3.nspcc.ru:10331", + "rpc4.n3.nspcc.ru:10331", + "rpc5.n3.nspcc.ru:10331", + "rpc6.n3.nspcc.ru:10331", + "rpc7.n3.nspcc.ru:10331", + }, + NeoFSContract: neofsMainnetAddress, + BalanceContract: balanceMainnetAddress, + }, +} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go new file mode 100644 index 000000000..a5adea0da --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go @@ -0,0 +1,432 @@ +package storagecfg + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "net" + "net/url" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "text/template" + "time" + + netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "github.com/chzyer/readline" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" +) + +const ( + walletFlag = "wallet" + accountFlag = "account" +) + +const ( + defaultControlEndpoint = "localhost:8090" + defaultDataEndpoint = "localhost" +) + +// RootCmd is a root command of config section. +var RootCmd = &cobra.Command{ + Use: "storage-config [-w wallet] [-a acccount] []", + Short: "Section for storage node configuration commands", + Run: storageConfig, +} + +func init() { + fs := RootCmd.Flags() + + fs.StringP(walletFlag, "w", "", "Path to wallet") + fs.StringP(accountFlag, "a", "", "Wallet account") +} + +type config struct { + AnnouncedAddress string + AuthorizedKeys []string + ControlEndpoint string + Endpoint string + TLSCert string + TLSKey string + MorphRPC []string + Attribute struct { + Locode string + } + Wallet struct { + Path string + Account string + Password string + } + Relay bool + BlobstorPath string + MetabasePath string +} + +func storageConfig(cmd *cobra.Command, args []string) { + outPath := getOutputPath(args) + + historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") + readline.SetHistoryPath(historyPath) + + var c config + + c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) + if c.Wallet.Path == "" { + c.Wallet.Path = getPath("Path to the storage node wallet: ") + } + + w, err := wallet.NewWalletFromFile(c.Wallet.Path) + fatalOnErr(err) + + fillWalletAccount(cmd, &c, w) + + accH, err := flags.ParseAddress(c.Wallet.Account) + fatalOnErr(err) + + acc := w.GetAccount(accH) + if acc == nil { + fatalOnErr(errors.New("can't find account in wallet")) + } + + c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) + fatalOnErr(err) + + err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) + fatalOnErr(err) + + c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) + + network := readNetwork(cmd) + + c.MorphRPC = n3config[network].MorphRPC + + depositGas(cmd, acc, network) + + c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") + + endpoint := getDefaultEndpoint(cmd, &c) + c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) + if c.Endpoint == "" { + c.Endpoint = endpoint + } + + c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) + if c.ControlEndpoint == "" { + c.ControlEndpoint = defaultControlEndpoint + } + + c.TLSCert = getPath("TLS Certificate (optional): ") + if c.TLSCert != "" { + c.TLSKey = getPath("TLS Key: ") + } + + c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") + if !c.Relay { + p := getPath("Path to the storage directory (all available storage will be used): ") + c.BlobstorPath = filepath.Join(p, "blob") + c.MetabasePath = filepath.Join(p, "meta") + } + + out := applyTemplate(c) + fatalOnErr(os.WriteFile(outPath, out, 0o644)) + + cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") +} + +func getDefaultEndpoint(cmd *cobra.Command, c *config) string { + var addr, port string + for { + c.AnnouncedAddress = getString("Publicly announced address: ") + validator := netutil.Address{} + err := validator.FromString(c.AnnouncedAddress) + if err != nil { + cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") + continue + } + uriAddr, err := url.Parse(validator.URIAddr()) + if err != nil { + panic(fmt.Errorf("unexpected error: %w", err)) + } + addr = uriAddr.Hostname() + port = uriAddr.Port() + ip, err := net.ResolveIPAddr("ip", addr) + if err != nil { + cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) + continue + } + + if !ip.IP.IsGlobalUnicast() { + cmd.Println("IP must be global unicast.") + continue + } + cmd.Printf("Resolved IP address: %s\n", ip.String()) + + _, err = strconv.ParseUint(port, 10, 16) + if err != nil { + cmd.Println("Port must be an integer.") + continue + } + + break + } + return net.JoinHostPort(defaultDataEndpoint, port) +} + +func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { + c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) + if c.Wallet.Account == "" { + addr := address.Uint160ToString(w.GetChangeAddress()) + c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) + if c.Wallet.Account == "" { + c.Wallet.Account = addr + } + } +} + +func readNetwork(cmd *cobra.Command) string { + var network string + for { + network = getString("Choose network [mainnet]/testnet: ") + switch network { + case "": + network = "mainnet" + case "testnet", "mainnet": + default: + cmd.Println(`Network must be either "mainnet" or "testnet"`) + continue + } + break + } + return network +} + +func getOutputPath(args []string) string { + if len(args) != 0 { + return args[0] + } + outPath := getPath("File to write config at [./config.yml]: ") + if outPath == "" { + outPath = "./config.yml" + } + return outPath +} + +func getWalletAccount(w *wallet.Wallet, prompt string) string { + addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) + for i := range w.Accounts { + addrs[i] = readline.PcItem(w.Accounts[i].Address) + } + + readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) + defer readline.SetAutoComplete(nil) + + s, err := readline.Line(prompt) + fatalOnErr(err) + return strings.TrimSpace(s) // autocompleter can return a string with a trailing space +} + +func getString(prompt string) string { + s, err := readline.Line(prompt) + fatalOnErr(err) + if s != "" { + _ = readline.AddHistory(s) + } + return s +} + +type filenameCompleter struct{} + +func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { + prefix := string(line[:pos]) + dir := filepath.Dir(prefix) + de, err := os.ReadDir(dir) + if err != nil { + return nil, 0 + } + + for i := range de { + name := filepath.Join(dir, de[i].Name()) + if strings.HasPrefix(name, prefix) { + tail := []rune(strings.TrimPrefix(name, prefix)) + if de[i].IsDir() { + tail = append(tail, filepath.Separator) + } + newLine = append(newLine, tail) + } + } + if pos != 0 { + return newLine, pos - len([]rune(dir)) + } + return newLine, 0 +} + +func getPath(prompt string) string { + readline.SetAutoComplete(filenameCompleter{}) + defer readline.SetAutoComplete(nil) + + p, err := readline.Line(prompt) + fatalOnErr(err) + + if p == "" { + return p + } + + _ = readline.AddHistory(p) + + abs, err := filepath.Abs(p) + if err != nil { + fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) + } + + return abs +} + +func getConfirmation(def bool, prompt string) bool { + for { + s, err := readline.Line(prompt) + fatalOnErr(err) + + switch strings.ToLower(s) { + case "y", "yes": + return true + case "n", "no": + return false + default: + if len(s) == 0 { + return def + } + } + } +} + +func applyTemplate(c config) []byte { + tmpl, err := template.New("config").Parse(configTemplate) + fatalOnErr(err) + + b := bytes.NewBuffer(nil) + fatalOnErr(tmpl.Execute(b, c)) + + return b.Bytes() +} + +func fatalOnErr(err error) { + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { + sideClient := initClient(n3config[network].MorphRPC) + balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) + + sideActor, err := actor.NewSimple(sideClient, acc) + if err != nil { + fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) + } + + sideGas := nep17.NewReader(sideActor, balanceHash) + accSH := acc.Contract.ScriptHash() + + balance, err := sideGas.BalanceOf(accSH) + if err != nil { + fatalOnErr(fmt.Errorf("side chain balance: %w", err)) + } + + ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", + fixedn.ToString(balance, 12))) + if !ok { + return + } + + amountStr := getString("Enter amount in GAS: ") + amount, err := fixedn.FromString(amountStr, 8) + if err != nil { + fatalOnErr(fmt.Errorf("invalid amount: %w", err)) + } + + mainClient := initClient(n3config[network].RPC) + neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) + + mainActor, err := actor.NewSimple(mainClient, acc) + if err != nil { + fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) + } + + mainGas := nep17.New(mainActor, gas.Hash) + + txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) + if err != nil { + fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) + } + + cmd.Print("Waiting for transactions to persist.") + tick := time.NewTicker(time.Second / 2) + defer tick.Stop() + + timer := time.NewTimer(time.Second * 20) + defer timer.Stop() + + at := trigger.Application + +loop: + for { + select { + case <-tick.C: + _, err := mainClient.GetApplicationLog(txHash, &at) + if err == nil { + cmd.Print("\n") + break loop + } + cmd.Print(".") + case <-timer.C: + cmd.Printf("\nTimeout while waiting for transaction to persist.\n") + if getConfirmation(false, "Continue configuration? yes/[no]: ") { + return + } + os.Exit(1) + } + } +} + +func initClient(rpc []string) *rpcclient.Client { + var c *rpcclient.Client + var err error + + shuffled := slices.Clone(rpc) + rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) + + for _, endpoint := range shuffled { + c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ + DialTimeout: time.Second * 2, + RequestTimeout: time.Second * 5, + }) + if err != nil { + continue + } + if err = c.Init(); err != nil { + continue + } + return c + } + + fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) + panic("unreachable") +} diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 299d0a830..3f235f070 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -858,8 +858,6 @@ type PatchObjectPrm struct { ReplaceAttribute bool - NewSplitHeader *objectSDK.SplitHeader - PayloadPatches []PayloadPatch } @@ -890,11 +888,7 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { return nil, fmt.Errorf("init payload reading: %w", err) } - if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ - NewSplitHeader: prm.NewSplitHeader, - NewAttributes: prm.NewAttributes, - ReplaceAttributes: prm.ReplaceAttribute, - }) { + if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) { for _, pp := range prm.PayloadPatches { payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) if err != nil { diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 1eadfa2e1..2d9c45cbd 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 6ed21e107..88321176f 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "The maximum number of intermediate nodes in the request route" + TTLUsage = "TTL value in request meta header" XHeadersKey = "xhdr" XHeadersShorthand = "x" diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index 0927788ba..a86506c37 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,7 +44,6 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) - _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -82,7 +81,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint := viper.GetString(commonflags.RPC) + endpoint, _ := cmd.Flags().GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index fac6eb2cd..8c4ab14f8 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - for key, val := range cnr.Attributes() { + cnr.IterateAttributes(func(key, val string) { cmd.Printf("\t%s=%s\n", key, val) - } + }) cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index e4a023d91..bbb8da840 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -102,9 +102,9 @@ func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, i cmd.Println(id.String()) if flagVarListPrintAttr { - for key, val := range cnr.Attributes() { + cnr.IterateUserAttributes(func(key, val string) { cmd.Printf(" %s: %s\n", key, val) - } + }) } } diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index cf4862b4a..dcd755510 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -5,9 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "maps" "os" - "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -21,9 +19,8 @@ import ( ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo - console *readline.Instance + cmd *cobra.Command + nodes map[string]netmap.NodeInfo } func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { @@ -40,10 +37,10 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - for k, v := range node.Attributes() { + node.IterateAttributes(func(k, v string) { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - } - fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + }) + fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -150,29 +147,12 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) + fmt.Printf("\t%2d: %v\n", i+1, ids) } return nil } -func (repl *policyPlaygroundREPL) handleHelp(args []string) error { - if len(args) != 0 { - if _, ok := commands[args[0]]; !ok { - return fmt.Errorf("unknown command: %q", args[0]) - } - fmt.Fprintln(repl.console, commands[args[0]].usage) - return nil - } - - commandList := slices.Collect(maps.Keys(commands)) - slices.Sort(commandList) - for _, command := range commandList { - fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) - } - return nil -} - func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -183,104 +163,15 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } -type commandDescription struct { - descriprion string - usage string -} - -var commands = map[string]commandDescription{ - "list": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - list - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "ls": { - descriprion: "Display all nodes in the netmap", - usage: `Display all nodes in the netmap -Example of usage: - ls - 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} - 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} -`, - }, - - "add": { - descriprion: "Add a new node: add attr=value", - usage: `Add a new node -Example of usage: - add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, - }, - - "load": { - descriprion: "Load netmap from file: load ", - usage: `Load netmap from file -Example of usage: - load "netmap.json" -File format (netmap.json): -{ - "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { - "continent": "Europe", - "country": "Poland" - }, - "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { - "continent": "Antarctica", - "country": "Heard Island" - } -}`, - }, - - "remove": { - descriprion: "Remove a node: remove ", - usage: `Remove a node -Example of usage: - remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "rm": { - descriprion: "Remove a node: rm ", - usage: `Remove a node -Example of usage: - rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, - }, - - "eval": { - descriprion: "Evaluate a policy: eval ", - usage: `Evaluate a policy -Example of usage: - eval REP 2`, - }, - - "help": { - descriprion: "Show available commands", - }, -} - -func (repl *policyPlaygroundREPL) handleCommand(args []string) error { - if len(args) == 0 { - return nil - } - - switch args[0] { - case "list", "ls": - return repl.handleLs(args[1:]) - case "add": - return repl.handleAdd(args[1:]) - case "load": - return repl.handleLoad(args[1:]) - case "remove", "rm": - return repl.handleRemove(args[1:]) - case "eval": - return repl.handleEval(args[1:]) - case "help": - return repl.handleHelp(args[1:]) - } - return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) -} +var policyPlaygroundCompleter = readline.NewPrefixCompleter( + readline.PcItem("list"), + readline.PcItem("ls"), + readline.PcItem("add"), + readline.PcItem("load"), + readline.PcItem("remove"), + readline.PcItem("rm"), + readline.PcItem("eval"), +) func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { @@ -299,32 +190,24 @@ func (repl *policyPlaygroundREPL) run() error { } } - if len(viper.GetString(netmapConfigPath)) > 0 { - err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) - commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) + cmdHandlers := map[string]func([]string) error{ + "list": repl.handleLs, + "ls": repl.handleLs, + "add": repl.handleAdd, + "load": repl.handleLoad, + "remove": repl.handleRemove, + "rm": repl.handleRemove, + "eval": repl.handleEval, } - var cfgCompleter []readline.PrefixCompleterInterface - var helpSubItems []readline.PrefixCompleterInterface - - for name := range commands { - if name != "help" { - cfgCompleter = append(cfgCompleter, readline.PcItem(name)) - helpSubItems = append(helpSubItems, readline.PcItem(name)) - } - } - - cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) - completer := readline.NewPrefixCompleter(cfgCompleter...) rl, err := readline.NewEx(&readline.Config{ Prompt: "> ", InterruptPrompt: "^C", - AutoComplete: completer, + AutoComplete: policyPlaygroundCompleter, }) if err != nil { return fmt.Errorf("error initializing readline: %w", err) } - repl.console = rl defer rl.Close() var exit bool @@ -342,8 +225,17 @@ func (repl *policyPlaygroundREPL) run() error { } exit = false - if err := repl.handleCommand(strings.Fields(line)); err != nil { - fmt.Fprintf(repl.console, "error: %v\n", err) + parts := strings.Fields(line) + if len(parts) == 0 { + continue + } + cmd := parts[0] + if handler, exists := cmdHandlers[cmd]; exists { + if err := handler(parts[1:]); err != nil { + fmt.Printf("error: %v\n", err) + } + } else { + fmt.Printf("error: unknown command %q\n", cmd) } } } @@ -359,14 +251,6 @@ If a wallet and endpoint is provided, the initial netmap data will be loaded fro }, } -const ( - netmapConfigPath = "netmap-config" - netmapConfigUsage = "Path to the netmap configuration file" -) - func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) - policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) - - _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index b8d7eb046..8032bf09a 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) + sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) + sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) + sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) + sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) } } @@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - fmt.Fprintf(sb, " Status: %s.", status) + sb.WriteString(fmt.Sprintf(" Status: %s.", status)) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees()) + resp.GetBody().GetFailedTrees())) } func initControlEvacuationShardCmd() { diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index 5da66dcd9..ae4bb329a 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -62,11 +62,11 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("state:", stateWord) - for s := range i.NetworkEndpoints() { + netmap.IterateNetworkEndpoints(i, func(s string) { cmd.Println("address:", s) - } + }) - for key, value := range i.Attributes() { + i.IterateAttributes(func(key, value string) { cmd.Printf("attribute: %s=%s\n", key, value) - } + }) } diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index d67db9f0d..53dd01868 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -18,7 +18,6 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" - "github.com/spf13/viper" ) // object lock command. @@ -79,7 +78,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint := viper.GetString(commonflags.RPC) + endpoint, _ := cmd.Flags().GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 476238651..bc34b370d 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "slices" "sync" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -49,12 +48,6 @@ type ecHeader struct { parent oid.ID } -type objectCounter struct { - sync.Mutex - total uint32 - isECcounted bool -} - type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -63,7 +56,6 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement - total uint32 } type ObjNodesDataObject struct { @@ -114,18 +106,18 @@ func objectNodes(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, count, result) + getActualPlacement(cmd, netmap, pk, objects, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -153,7 +145,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj}, 1 + return []phyObject{obj} } var errSplitInfo *objectSDK.SplitInfoError @@ -163,34 +155,29 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 + return getECObjectChunks(cmd, cnrID, objID, ecInfoError) } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil, 0 + return nil } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { - members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { + members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { - var total int +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { splitInfo := errSplitInfo.SplitInfo() if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - if total = len(members); total > 0 { - total-- // linking object is not data object - } - return members, total + return members } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members, len(members) + return members } - members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) - return members, len(members) + return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { @@ -396,11 +383,8 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { resultMtx := &sync.Mutex{} - counter := &objectCounter{ - total: uint32(count), - } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) @@ -417,7 +401,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. for _, object := range objects { eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -436,7 +420,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) - result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -461,11 +444,17 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N var cli *client.Client var addresses []string if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + candidate.IterateNetworkEndpoints(func(s string) bool { + addresses = append(addresses, s) + return false + }) addresses = append(addresses, candidate.ExternalAddresses()...) } else { addresses = append(addresses, candidate.ExternalAddresses()...) - addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) + candidate.IterateNetworkEndpoints(func(s string) bool { + addresses = append(addresses, s) + return false + }) } var lastErr error @@ -489,7 +478,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -504,14 +493,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { - if res.Header().ECHeader() != nil { - counter.Lock() - defer counter.Unlock() - if !counter.isECcounted { - counter.total *= res.Header().ECHeader().Total() - } - counter.isECcounted = true - } return true, nil } var notFound *apistatus.ObjectNotFound @@ -531,8 +512,7 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) - fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index ebbde76a2..d98182679 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -2,7 +2,6 @@ package object import ( "fmt" - "os" "strconv" "strings" @@ -10,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -22,7 +20,6 @@ const ( replaceAttrsFlagName = "replace-attrs" rangeFlagName = "range" payloadFlagName = "payload" - splitHeaderFlagName = "split-header" ) var objectPatchCmd = &cobra.Command{ @@ -53,7 +50,6 @@ func initObjectPatchCmd() { flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") - flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") } func patch(cmd *cobra.Command, _ []string) { @@ -88,8 +84,6 @@ func patch(cmd *cobra.Command, _ []string) { prm.NewAttributes = newAttrs prm.ReplaceAttribute = replaceAttrs - prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) - for i := range ranges { prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ Range: ranges[i], @@ -153,22 +147,3 @@ func patchPayloadPaths(cmd *cobra.Command) []string { v, _ := cmd.Flags().GetStringSlice(payloadFlagName) return v } - -func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { - path, _ := cmd.Flags().GetString(splitHeaderFlagName) - if path == "" { - return nil - } - - data, err := os.ReadFile(path) - commonCmd.ExitOnErr(cmd, "read file error: %w", err) - - splitHdrV2 := new(objectV2.SplitHeader) - err = splitHdrV2.Unmarshal(data) - if err != nil { - err = splitHdrV2.UnmarshalJSON(data) - commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) - } - - return objectSDK.NewSplitHeaderFromV2(splitHdrV2) -} diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index 6ec508ae2..be4fee4cf 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !toJSON && !toProto { + if !(toJSON || toProto) { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index 8e4e8b287..3955f8ee1 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -262,8 +262,13 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - objs = collectObjectRelatives(cmd, cli, cnr, *obj) - objs = append(objs, *obj) + rels := collectObjectRelatives(cmd, cli, cnr, *obj) + + if len(rels) == 0 { + objs = []oid.ID{*obj} + } else { + objs = append(rels, *obj) + } } } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index d71a94b98..933378df6 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,19 +2,17 @@ package tree import ( "context" - "crypto/tls" "fmt" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -33,29 +31,22 @@ func _client() (tree.TreeServiceClient, error) { return nil, err } - host, isTLS, err := client.ParseURI(netAddr.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - tracing.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( tracing.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDisableServiceConfig(), - grpc.WithTransportCredentials(creds), } - cc, err := grpc.NewClient(host, opts...) + if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) return tree.NewTreeServiceClient(cc), err } diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 13a747ba6..09af08525 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -4,14 +4,11 @@ import ( "context" "os" "os/signal" - "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -41,33 +38,13 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) - var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - err = logPrm.SetTags(loggerTags()) - if err != nil { - return err - } - logger.UpdateLevelForTags(logPrm) + logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") - return nil -} - -func loggerTags() [][]string { - var res [][]string - for i := 0; ; i++ { - var item []string - index := strconv.FormatInt(int64(i), 10) - names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) - if names == "" { - break - } - item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) - res = append(res, item) - } - return res + return logPrm.Reload() } func watchForSignal(ctx context.Context, cancel func()) { diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 799feb784..ade64ba84 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -31,6 +31,7 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors + logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -69,7 +70,6 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() - var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) @@ -80,14 +80,10 @@ func main() { exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") - err = logPrm.SetTags(loggerTags()) - exitErr(err) log, err = logger.NewLogger(logPrm) exitErr(err) - logger.UpdateLevelForTags(logPrm) - ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go index 077a68785..9bad19032 100644 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -3,8 +3,6 @@ package common import ( "errors" "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) type FilterResult byte @@ -73,7 +71,11 @@ func (fp FallbackParser) ToParser() Parser { func (p Parser) ToFallbackParser() FallbackParser { return func(key, value []byte) (SchemaEntry, Parser) { entry, next, err := p(key, value) - assert.NoError(err, "couldn't use that parser as a fallback parser") + if err != nil { + panic(fmt.Errorf( + "couldn't use that parser as a fallback parser, it returned an error: %w", err, + )) + } return entry, next } } diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go index 3bfe2608b..7d70b27b2 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, r.addr.SetContainer(cnr) r.addr.SetObject(obj) - r.data = value + r.data = value[:] return &r, nil, nil } diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 471514e5d..90729c119 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo f.historyPointer++ // Stop iterating over history. if f.historyPointer == len(f.history) { - f.SetText(f.currentContent) + f.InputField.SetText(f.currentContent) return } - f.SetText(f.history[f.historyPointer]) + f.InputField.SetText(f.history[f.historyPointer]) case tcell.KeyUp: if len(f.history) == 0 { return } // Start iterating over history. if f.historyPointer == len(f.history) { - f.currentContent = f.GetText() + f.currentContent = f.InputField.GetText() } // End of history. if f.historyPointer == 0 { @@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo } // Iterate to least recent prompts. f.historyPointer-- - f.SetText(f.history[f.historyPointer]) + f.InputField.SetText(f.history[f.historyPointer]) default: f.InputField.InputHandler()(event, func(tview.Primitive) {}) } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index a4d392ab3..5f61df884 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -8,7 +8,6 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -95,7 +94,9 @@ func (v *RecordsView) Mount(ctx context.Context) error { } func (v *RecordsView) Unmount() { - assert.False(v.onUnmount == nil, "try to unmount not mounted component") + if v.onUnmount == nil { + panic("try to unmount not mounted component") + } v.onUnmount() v.onUnmount = nil } diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index cc6b7859e..bcc082821 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { return } - switch v := ui.mountedPage.(type) { + switch ui.mountedPage.(type) { case *BucketsView: ui.moveNextPage(NewBucketsView(ui, res)) case *RecordsView: - bucket := v.bucket + bucket := ui.mountedPage.(*RecordsView).bucket ui.moveNextPage(NewRecordsView(ui, bucket, res)) } @@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) } - ui.MouseHandler() + ui.Box.MouseHandler() } func (ui *UI) WithPrompt(prompt string) error { diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index 513314712..e761a1b14 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -14,7 +14,7 @@ import ( func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.key, + c.shared.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index ce8ae9662..64c3beba7 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,5 +6,9 @@ import ( ) func parseAttributes(c *cfg) { + if nodeconfig.Relay(c.appCfg) { + return + } + fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index e5df0a22d..0fe56d2b0 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,27 +1,20 @@ package main import ( - "bytes" - "cmp" "context" - "slices" "sync" - "sync/atomic" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/hashicorp/golang-lru/v2/simplelru" - "go.uber.org/zap" ) type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) @@ -117,6 +110,55 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } +// entity that provides LRU cache interface. +type lruNetCache struct { + cache *lru.Cache[uint64, *netmapSDK.NetMap] + + netRdr netValueReader[uint64, *netmapSDK.NetMap] + + metrics cacheMetrics +} + +// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. +func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { + cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) + fatalOnErr(err) + + return &lruNetCache{ + cache: cache, + netRdr: netRdr, + metrics: metrics, + } +} + +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + val, ok := c.cache.Get(key) + if ok { + hit = true + return val, nil + } + + val, err := c.netRdr(ctx, key) + if err != nil { + return nil, err + } + + c.cache.Add(key, val) + + return val, nil +} + // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -158,222 +200,20 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con type lruNetmapSource struct { netState netmap.State - client rawSource - cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] - mtx sync.RWMutex - metrics cacheMetrics - log *logger.Logger - candidates atomic.Pointer[[]netmapSDK.NodeInfo] + cache *lruNetCache } -type rawSource interface { - GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) - GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) -} - -func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, - netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, -) netmap.Source { +func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { const netmapCacheSize = 10 - cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) - fatalOnErr(err) + lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + return v.GetNetMapByEpoch(ctx, key) + }, metrics.NewCacheMetrics("netmap")) - src := &lruNetmapSource{ - netState: netState, - client: client, - cache: cache, - log: log, - metrics: metrics.NewCacheMetrics("netmap"), + return &lruNetmapSource{ + netState: s, + cache: lruNetmapCache, } - - wg.Add(1) - go func() { - defer wg.Done() - src.updateCandidates(ctx, d) - }() - - return src -} - -// updateCandidates routine to merge netmap in cache with candidates list. -func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { - timer := time.NewTimer(d) - defer timer.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-timer.C: - newCandidates, err := s.client.GetCandidates(ctx) - if err != nil { - s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) - timer.Reset(d) - break - } - if len(newCandidates) == 0 { - s.candidates.Store(&newCandidates) - timer.Reset(d) - break - } - slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - return cmp.Compare(n1.Hash(), n2.Hash()) - }) - - // Check once state changed - v := s.candidates.Load() - if v == nil { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - timer.Reset(d) - break - } - ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { - if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || - uint32(n1.Status()) != uint32(n2.Status()) || - slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { - return 1 - } - ne1 := slices.Collect(n1.NetworkEndpoints()) - ne2 := slices.Collect(n2.NetworkEndpoints()) - return slices.Compare(ne1, ne2) - }) - if ret != 0 { - s.candidates.Store(&newCandidates) - s.mergeCacheWithCandidates(newCandidates) - } - timer.Reset(d) - } - } -} - -func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { - s.mtx.Lock() - tmp := s.cache.Values() - s.mtx.Unlock() - for _, pointer := range tmp { - nm := pointer.Load() - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - nm = nm.Clone() - mergeNetmapWithCandidates(updates, nm) - pointer.Store(nm) - } - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - s.mtx.RLock() - val, ok := s.cache.Get(key) - s.mtx.RUnlock() - if ok { - hit = true - return val.Load(), nil - } - - s.mtx.Lock() - defer s.mtx.Unlock() - - val, ok = s.cache.Get(key) - if ok { - hit = true - return val.Load(), nil - } - - nm, err := s.client.GetNetMapByEpoch(ctx, key) - if err != nil { - return nil, err - } - v := s.candidates.Load() - if v != nil { - updates := getNetMapNodesToUpdate(nm, *v) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - } - - p := atomic.Pointer[netmapSDK.NetMap]{} - p.Store(nm) - s.cache.Add(key, &p) - - return nm, nil -} - -// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. -func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { - for _, v := range updates { - if v.status != netmapSDK.UnspecifiedState { - nm.Nodes()[v.netmapIndex].SetStatus(v.status) - } - if v.externalAddresses != nil { - nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) - } - if v.endpoints != nil { - nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) - } - } -} - -type nodeToUpdate struct { - netmapIndex int - status netmapSDK.NodeState - externalAddresses []string - endpoints []string -} - -// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. -func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { - var res []nodeToUpdate - for i := range nm.Nodes() { - for _, cnd := range candidates { - if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { - var tmp nodeToUpdate - var update bool - - if cnd.Status() != nm.Nodes()[i].Status() && - (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { - update = true - tmp.status = cnd.Status() - } - - externalAddresses := cnd.ExternalAddresses() - if externalAddresses != nil && - slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { - update = true - tmp.externalAddresses = externalAddresses - } - - nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) - nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) - candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) - candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) - if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { - update = true - tmp.endpoints = candidateEndpoints - } - - if update { - tmp.netmapIndex = i - res = append(res, tmp) - } - - break - } - } - } - return res } func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { @@ -385,7 +225,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (* } func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.get(ctx, epoch) + val, err := s.cache.get(ctx, epoch) if err != nil { return nil, err } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index 24286826f..b1601aa67 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -3,11 +3,9 @@ package main import ( "context" "errors" - "sync" "testing" "time" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -61,75 +59,3 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} - -type rawSrc struct{} - -func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Online) - node0.SetExternalAddresses("1", "0") - node0.SetNetworkEndpoints("1", "0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Online) - node1.SetExternalAddresses("1", "0") - node1.SetNetworkEndpoints("1", "0") - - return []netmapSDK.NodeInfo{node0, node1}, nil -} - -func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm := netmapSDK.NetMap{} - nm.SetEpoch(1) - - node0 := netmapSDK.NodeInfo{} - node0.SetPublicKey([]byte{byte(1)}) - node0.SetStatus(netmapSDK.Maintenance) - node0.SetExternalAddresses("0") - node0.SetNetworkEndpoints("0") - - node1 := netmapSDK.NodeInfo{} - node1.SetPublicKey([]byte{byte(1)}) - node1.SetStatus(netmapSDK.Maintenance) - node1.SetExternalAddresses("0") - node1.SetNetworkEndpoints("0") - - nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) - - return &nm, nil -} - -type st struct{} - -func (s *st) CurrentEpoch() uint64 { - return 1 -} - -func TestNetmapStorage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - wg := sync.WaitGroup{} - cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) - - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) - require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) - require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) - - require.Eventually(t, func() bool { - nm, err := cache.GetNetMapByEpoch(ctx, 1) - require.NoError(t, err) - for _, node := range nm.Nodes() { - if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && - node.NumberOfNetworkEndpoints() == 2) { - return false - } - } - return true - }, time.Second*5, time.Millisecond*10) - - cancel() - wg.Wait() -} diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 96274e625..2531e9173 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -30,7 +30,6 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" - treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -41,7 +40,6 @@ import ( netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -110,8 +108,6 @@ type applicationConfiguration struct { level string destination string timestamp bool - options []zap.Option - tags [][]string } ObjectCfg struct { @@ -130,9 +126,12 @@ type applicationConfiguration struct { } type shardCfg struct { - compression compression.Config + compress bool + estimateCompressibility bool + estimateCompressibilityThreshold float64 smallSizeObjectLimit uint64 + uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode @@ -233,29 +232,19 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) - var opts []zap.Option - if loggerconfig.ToLokiConfig(c).Enabled { - opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) - return lokiCore - })} - } - a.LoggerCfg.options = opts - a.LoggerCfg.tags = loggerconfig.Tags(c) // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - locodeDBPath := nodeconfig.LocodeDBPath(c) - parser, err := placement.NewMetricsParser(locodeDBPath) - if err != nil { - return fmt.Errorf("metrics parser creation: %w", err) + var pm []placement.Metric + for _, raw := range objectconfig.Get(c).Priority() { + m, err := placement.ParseMetric(raw) + if err != nil { + return err + } + pm = append(pm, m) } - m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) - if err != nil { - return fmt.Errorf("parse metrics: %w", err) - } - a.ObjectCfg.priorityMetrics = m + a.ObjectCfg.priorityMetrics = pm // Storage Engine @@ -271,7 +260,10 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *s target.refillMetabase = source.RefillMetabase() target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() target.mode = source.Mode() - target.compression = source.Compression() + target.compress = source.Compress() + target.estimateCompressibility = source.EstimateCompressibility() + target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold() + target.uncompressableContentType = source.UncompressableContentTypes() target.smallSizeObjectLimit = source.SmallSizeLimit() a.setShardWriteCacheConfig(&target, source) @@ -382,11 +374,14 @@ func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardco } func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { - limitsConfig := source.Limits().ToConfig() + limitsConfig := source.Limits() limiter, err := qos.NewLimiter(limitsConfig) if err != nil { return err } + if target.limiter != nil { + target.limiter.Close() + } target.limiter = limiter return nil } @@ -478,6 +473,7 @@ type shared struct { // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { + logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -651,6 +647,7 @@ type cfgNetmap struct { state *networkState + needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime } @@ -709,18 +706,24 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) + relayOnly := nodeconfig.Relay(appCfg) + netState := newNetworkState() - c.shared = initShared(appCfg, key, netState) + c.shared = initShared(appCfg, key, netState, relayOnly) netState.metrics = c.metricsCollector - logPrm, err := c.loggerPrm() - fatalOnErr(err) + logPrm := c.loggerPrm() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - logger.UpdateLevelForTags(logPrm) + if loggerconfig.ToLokiConfig(appCfg).Enabled { + log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) + return lokiCore + })) + } c.internals = initInternals(appCfg, log) @@ -731,7 +734,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState) + c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) c.cfgGRPC = initCfgGRPC() @@ -777,8 +780,12 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { - netAddr := nodeconfig.BootstrapAddresses(appCfg) +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { + var netAddr network.AddressGroup + + if !relayOnly { + netAddr = nodeconfig.BootstrapAddresses(appCfg) + } persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) @@ -829,15 +836,18 @@ func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) interna return result } -func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { +func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) + var reBootstrapTurnedOff atomic.Bool + reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - reBoostrapTurnedOff: &atomic.Bool{}, + needBootstrap: !relayOnly, + reBoostrapTurnedOff: &reBootstrapTurnedOff, } } @@ -882,7 +892,7 @@ func (c *cfg) engineOpts() []engine.Option { opts = append(opts, engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log.WithTag(logger.TagEngine)), + engine.WithLogger(c.log), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), ) @@ -919,7 +929,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), + writecache.WithLogger(c.log), writecache.WithQoSLimiter(shCfg.limiter), ) } @@ -959,8 +969,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), - blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), + blobovniczatree.WithLogger(c.log), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -983,7 +992,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), + fstree.WithLogger(c.log), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -1013,9 +1022,12 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompression(shCfg.compression), + blobstor.WithCompressObjects(shCfg.compress), + blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), + blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), + blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), + blobstor.WithLogger(c.log), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -1040,7 +1052,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log.WithTag(logger.TagShard)), + shard.WithLogger(c.log), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), @@ -1064,28 +1076,26 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return sh } -func (c *cfg) loggerPrm() (logger.Prm, error) { - var prm logger.Prm - // (re)init read configuration - err := prm.SetLevelString(c.LoggerCfg.level) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) - } - err = prm.SetDestination(c.LoggerCfg.destination) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) - } - prm.PrependTimestamp = c.LoggerCfg.timestamp - prm.Options = c.LoggerCfg.options - err = prm.SetTags(c.LoggerCfg.tags) - if err != nil { - // not expected since validation should be performed before - return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) +func (c *cfg) loggerPrm() *logger.Prm { + // check if it has been inited before + if c.dynamicConfiguration.logger == nil { + c.dynamicConfiguration.logger = new(logger.Prm) } - return prm, nil + // (re)init read configuration + err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + if err != nil { + // not expected since validation should be performed before + panic("incorrect log level format: " + c.LoggerCfg.level) + } + err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + if err != nil { + // not expected since validation should be performed before + panic("incorrect log destination format: " + c.LoggerCfg.destination) + } + c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp + + return c.dynamicConfiguration.logger } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1246,6 +1256,11 @@ func (c *cfg) bootstrap(ctx context.Context) error { return bootstrapOnline(ctx, c) } +// needBootstrap checks if local node should be registered in network on bootup. +func (c *cfg) needBootstrap() bool { + return c.cfgNetmap.needBootstrap +} + type dCmp struct { name string reloadFunc func() error @@ -1320,7 +1335,11 @@ func (c *cfg) reloadConfig(ctx context.Context) { // all the components are expected to support // Logger's dynamic reconfiguration approach - components := c.getComponents(ctx) + // Logger + + logPrm := c.loggerPrm() + + components := c.getComponents(ctx, logPrm) // Object c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) @@ -1358,17 +1377,10 @@ func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } -func (c *cfg) getComponents(ctx context.Context) []dCmp { +func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { var components []dCmp - components = append(components, dCmp{"logger", func() error { - prm, err := c.loggerPrm() - if err != nil { - return err - } - logger.UpdateLevelForTags(prm) - return nil - }}) + components = append(components, dCmp{"logger", logPrm.Reload}) components = append(components, dCmp{"runtime", func() error { setRuntimeParameters(ctx, c) return nil @@ -1389,12 +1401,6 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp { } return err }}) - if c.treeService != nil { - components = append(components, dCmp{"tree", func() error { - c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) - return nil - }}) - } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 401c54edc..eaf2a294e 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -11,11 +11,10 @@ import ( blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) @@ -101,11 +100,10 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compression().Enabled) - require.Equal(t, compression.LevelFastest, sc.Compression().Level) - require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) - require.Equal(t, true, sc.Compression().EstimateCompressibility) - require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) + require.Equal(t, true, sc.Compress()) + require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) + require.Equal(t, true, sc.EstimateCompressibility()) + require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -137,8 +135,8 @@ func TestEngineSection(t *testing.T) { require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write + readLimits := limits.Read() + writeLimits := limits.Write() require.Equal(t, 30*time.Second, readLimits.IdleTimeout) require.Equal(t, int64(10_000), readLimits.MaxRunningOps) require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) @@ -146,7 +144,7 @@ func TestEngineSection(t *testing.T) { require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) require.Equal(t, int64(100), writeLimits.MaxWaitingOps) require.ElementsMatch(t, readLimits.Tags, - []qos.IOTagConfig{ + []limitsconfig.IOTagConfig{ { Tag: "internal", Weight: toPtr(20), @@ -170,19 +168,13 @@ func TestEngineSection(t *testing.T) { LimitOps: toPtr(25000), }, { - Tag: "policer", - Weight: toPtr(5), - LimitOps: toPtr(25000), - Prohibited: true, - }, - { - Tag: "treesync", + Tag: "policer", Weight: toPtr(5), - LimitOps: toPtr(25), + LimitOps: toPtr(25000), }, }) require.ElementsMatch(t, writeLimits.Tags, - []qos.IOTagConfig{ + []limitsconfig.IOTagConfig{ { Tag: "internal", Weight: toPtr(200), @@ -210,11 +202,6 @@ func TestEngineSection(t *testing.T) { Weight: toPtr(50), LimitOps: toPtr(2500), }, - { - Tag: "treesync", - Weight: toPtr(50), - LimitOps: toPtr(100), - }, }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) @@ -238,9 +225,8 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compression().Enabled) - require.Equal(t, compression.LevelDefault, sc.Compression().Level) - require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) + require.Equal(t, false, sc.Compress()) + require.Equal(t, []string(nil), sc.UncompressableContentTypes()) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -272,14 +258,14 @@ func TestEngineSection(t *testing.T) { require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) - readLimits := limits.ToConfig().Read - writeLimits := limits.ToConfig().Write - require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) - require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) - require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) - require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) + readLimits := limits.Read() + writeLimits := limits.Write() + require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps) require.Equal(t, 0, len(readLimits.Tags)) require.Equal(t, 0, len(writeLimits.Tags)) } diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index d42646da7..e50d56b95 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -8,7 +8,6 @@ import ( metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -28,27 +27,42 @@ func From(c *config.Config) *Config { return (*Config)(c) } -func (x *Config) Compression() compression.Config { - cc := (*config.Config)(x).Sub("compression") - if cc == nil { - return compression.Config{} - } - return compression.Config{ - Enabled: config.BoolSafe(cc, "enabled"), - UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), - Level: compression.Level(config.StringSafe(cc, "level")), - EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), - EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), - } +// Compress returns the value of "compress" config parameter. +// +// Returns false if the value is not a valid bool. +func (x *Config) Compress() bool { + return config.BoolSafe( + (*config.Config)(x), + "compress", + ) +} + +// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. +// +// Returns nil if a the value is missing or is invalid. +func (x *Config) UncompressableContentTypes() []string { + return config.StringSliceSafe( + (*config.Config)(x), + "compression_exclude_content_types") +} + +// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. +// +// Returns false if the value is not a valid bool. +func (x *Config) EstimateCompressibility() bool { + return config.BoolSafe( + (*config.Config)(x), + "compression_estimate_compressibility", + ) } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func estimateCompressibilityThreshold(c *config.Config) float64 { +func (x *Config) EstimateCompressibilityThreshold() float64 { v := config.FloatOrDefault( - c, - "estimate_compressibility_threshold", + (*config.Config)(x), + "compression_estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go index ccd1e0000..b9b5c4382 100644 --- a/cmd/frostfs-node/config/engine/shard/limits/config.go +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -1,13 +1,19 @@ package limits import ( + "math" "strconv" + "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "github.com/spf13/cast" ) +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + // From wraps config section into Config. func From(c *config.Config) *Config { return (*Config)(c) @@ -17,43 +23,36 @@ func From(c *config.Config) *Config { // which provides access to Shard's limits configurations. type Config config.Config -func (x *Config) ToConfig() qos.LimiterConfig { - result := qos.LimiterConfig{ - Read: x.read(), - Write: x.write(), - } - panicOnErr(result.Validate()) - return result -} - -func (x *Config) read() qos.OpConfig { +// Read returns the value of "read" limits config section. +func (x *Config) Read() OpConfig { return x.parse("read") } -func (x *Config) write() qos.OpConfig { +// Write returns the value of "write" limits config section. +func (x *Config) Write() OpConfig { return x.parse("write") } -func (x *Config) parse(sub string) qos.OpConfig { +func (x *Config) parse(sub string) OpConfig { c := (*config.Config)(x).Sub(sub) - var result qos.OpConfig + var result OpConfig if s := config.Int(c, "max_waiting_ops"); s > 0 { result.MaxWaitingOps = s } else { - result.MaxWaitingOps = qos.NoLimit + result.MaxWaitingOps = NoLimit } if s := config.Int(c, "max_running_ops"); s > 0 { result.MaxRunningOps = s } else { - result.MaxRunningOps = qos.NoLimit + result.MaxRunningOps = NoLimit } if s := config.DurationSafe(c, "idle_timeout"); s > 0 { result.IdleTimeout = s } else { - result.IdleTimeout = qos.DefaultIdleTimeout + result.IdleTimeout = DefaultIdleTimeout } result.Tags = tags(c) @@ -61,16 +60,42 @@ func (x *Config) parse(sub string) qos.OpConfig { return result } -func tags(c *config.Config) []qos.IOTagConfig { +type OpConfig struct { + // MaxWaitingOps returns the value of "max_waiting_ops" config parameter. + // + // Equals NoLimit if the value is not a positive number. + MaxWaitingOps int64 + // MaxRunningOps returns the value of "max_running_ops" config parameter. + // + // Equals NoLimit if the value is not a positive number. + MaxRunningOps int64 + // IdleTimeout returns the value of "idle_timeout" config parameter. + // + // Equals DefaultIdleTimeout if the value is not a valid duration. + IdleTimeout time.Duration + // Tags returns the value of "tags" config parameter. + // + // Equals nil if the value is not a valid tags config slice. + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 +} + +func tags(c *config.Config) []IOTagConfig { c = c.Sub("tags") - var result []qos.IOTagConfig + var result []IOTagConfig for i := 0; ; i++ { tag := config.String(c, strconv.Itoa(i)+".tag") if tag == "" { return result } - var tagConfig qos.IOTagConfig + var tagConfig IOTagConfig tagConfig.Tag = tag v := c.Value(strconv.Itoa(i) + ".weight") @@ -94,13 +119,6 @@ func tags(c *config.Config) []qos.IOTagConfig { tagConfig.ReservedOps = &r } - v = c.Value(strconv.Itoa(i) + ".prohibited") - if v != nil { - r, err := cast.ToBoolE(v) - panicOnErr(err) - tagConfig.Prohibited = r - } - result = append(result, tagConfig) } } diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index 20f373184..ba9eeea2b 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,7 +2,6 @@ package loggerconfig import ( "os" - "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -61,21 +60,6 @@ func Timestamp(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "timestamp") } -// Tags returns the value of "tags" config parameter from "logger" section. -func Tags(c *config.Config) [][]string { - var res [][]string - sub := c.Sub(subsection).Sub("tags") - for i := 0; ; i++ { - s := sub.Sub(strconv.FormatInt(int64(i), 10)) - names := config.StringSafe(s, "names") - if names == "" { - break - } - res = append(res, []string{names, config.StringSafe(s, "level")}) - } - return res -} - // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index 796ad529e..ffe8ac693 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -22,9 +22,6 @@ func TestLoggerSection_Level(t *testing.T) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) require.Equal(t, true, loggerconfig.Timestamp(c)) - tags := loggerconfig.Tags(c) - require.Equal(t, "main, morph", tags[0][0]) - require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index a9f774d18..d089870ea 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -33,9 +33,6 @@ const ( // ContainerCacheSizeDefault represents the default size for the container cache. ContainerCacheSizeDefault = 100 - - // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. - PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -157,17 +154,3 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } - -// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter -// from "morph" section. -// -// Returns PollCandidatesTimeoutDefault if the value is not positive duration. -func NetmapCandidatesPollInterval(c *config.Config) time.Duration { - v := config.DurationSafe(c.Sub(subsection). - Sub("netmap").Sub("candidates"), "poll_interval") - if v > 0 { - return v - } - - return PollCandidatesTimeoutDefault -} diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index c50718c5f..969d77396 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,9 +3,7 @@ package nodeconfig import ( "fmt" "io/fs" - "iter" "os" - "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -90,8 +88,12 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) Addresses() iter.Seq[string] { - return slices.Values(x) +func (x stringAddressGroup) IterateAddresses(f func(string) bool) { + for i := range x { + if f(x[i]) { + break + } + } } func (x stringAddressGroup) NumberOfAddresses() int { @@ -131,6 +133,14 @@ func Attributes(c *config.Config) (attrs []string) { return } +// Relay returns the value of "relay" config parameter +// from "node" section. +// +// Returns false if the value is not set. +func Relay(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "relay") +} + // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { @@ -207,8 +217,3 @@ func (l PersistentPolicyRulesConfig) NoSync() bool { func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } - -// LocodeDBPath returns path to LOCODE database. -func LocodeDBPath(c *config.Config) string { - return config.String(c.Sub(subsection), "locode_db_path") -} diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 9af1dc038..7b9adecf4 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,10 +29,12 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) + relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) + require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -43,6 +45,7 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) + relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -84,6 +87,8 @@ func TestNodeSection(t *testing.T) { return false }) + require.Equal(t, true, relay) + require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go index e0efdfde2..197990d07 100644 --- a/cmd/frostfs-node/config/rpc/config.go +++ b/cmd/frostfs-node/config/rpc/config.go @@ -31,11 +31,12 @@ func Limits(c *config.Config) []LimitConfig { break } - if sc.Value("max_ops") == nil { + maxOps := config.IntSafe(sc, "max_ops") + if maxOps == 0 { panic("no max operations for method group") } - limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) + limits = append(limits, LimitConfig{methods, maxOps}) } return limits diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go index a6365e19f..31a837cee 100644 --- a/cmd/frostfs-node/config/rpc/config_test.go +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) { }) t.Run("no max operations", func(t *testing.T) { - const path = "testdata/no_max_ops" + const path = "testdata/node" fileConfigTest := func(c *config.Config) { require.Panics(t, func() { _ = Limits(c) }) @@ -50,28 +50,4 @@ func TestRPCSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) }) - - t.Run("zero max operations", func(t *testing.T) { - const path = "testdata/zero_max_ops" - - fileConfigTest := func(c *config.Config) { - limits := Limits(c) - require.Len(t, limits, 2) - - limit0 := limits[0] - limit1 := limits[1] - - require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) - require.Equal(t, limit0.MaxOps, int64(0)) - - require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) - require.Equal(t, limit1.MaxOps, int64(10000)) - } - - configtest.ForEachFileType(path, fileConfigTest) - - t.Run("ENV", func(t *testing.T) { - configtest.ForEnvFileType(t, path, fileConfigTest) - }) - }) } diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/node.env similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/no_max_ops.env rename to cmd/frostfs-node/config/rpc/testdata/node.env diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/node.json similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/no_max_ops.json rename to cmd/frostfs-node/config/rpc/testdata/node.json diff --git a/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/node.yaml similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml rename to cmd/frostfs-node/config/rpc/testdata/node.yaml diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env deleted file mode 100644 index ce7302b0b..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env +++ /dev/null @@ -1,4 +0,0 @@ -FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" -FROSTFS_RPC_LIMITS_0_MAX_OPS=0 -FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" -FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json deleted file mode 100644 index 16a1c173f..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "rpc": { - "limits": [ - { - "methods": [ - "/neo.fs.v2.object.ObjectService/PutSingle", - "/neo.fs.v2.object.ObjectService/Put" - ], - "max_ops": 0 - }, - { - "methods": [ - "/neo.fs.v2.object.ObjectService/Get" - ], - "max_ops": 10000 - } - ] - } -} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml deleted file mode 100644 index 525d768d4..000000000 --- a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml +++ /dev/null @@ -1,9 +0,0 @@ -rpc: - limits: - - methods: - - /neo.fs.v2.object.ObjectService/PutSingle - - /neo.fs.v2.object.ObjectService/Put - max_ops: 0 - - methods: - - /neo.fs.v2.object.ObjectService/Get - max_ops: 10000 diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index bdb280d87..012012297 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) { wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.cnrClient = wrap + c.shared.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.frostfsidClient = frostfsIDSubjectProvider + c.shared.frostfsidClient = frostfsIDSubjectProvider c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( @@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, containerService.NewSplitterService( c.cfgContainer.containerBatchSize, c.respSvc, containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index d9ca01e70..19b4af51f 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.metrics == nil { - c.metrics = new(httpComponent) - c.metrics.cfg = c - c.metrics.name = "metrics" - c.metrics.handler = metrics.Handler() + if c.dynamicConfiguration.metrics == nil { + c.dynamicConfiguration.metrics = new(httpComponent) + c.dynamicConfiguration.metrics.cfg = c + c.dynamicConfiguration.metrics.name = "metrics" + c.dynamicConfiguration.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.metrics.enabled { - c.metrics.enabled = enabled + if enabled != c.dynamicConfiguration.metrics.enabled { + c.dynamicConfiguration.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.metrics.address { - c.metrics.address = address + if address != c.dynamicConfiguration.metrics.address { + c.dynamicConfiguration.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.metrics.shutdownDur { - c.metrics.shutdownDur = dur + if dur != c.dynamicConfiguration.metrics.shutdownDur { + c.dynamicConfiguration.metrics.shutdownDur = dur updated = true } - return c.metrics, updated + return c.dynamicConfiguration.metrics, updated } func enableMetricsSvc(c *cfg) { - c.metricsSvc.Enable() + c.shared.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.metricsSvc.Disable() + c.shared.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 917cf6fc0..657e22389 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -61,11 +60,10 @@ func (c *cfg) initMorphComponents(ctx context.Context) { } if c.cfgMorph.cacheTTL < 0 { - netmapSource = newRawNetmapStorage(wrap) + netmapSource = wrap } else { // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, - morphconfig.NetmapCandidatesPollInterval(c.appCfg)) + netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) } c.netMapSource = netmapSource @@ -85,7 +83,7 @@ func initMorphClient(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log.WithTag(logger.TagMorph)), + client.WithLogger(c.log), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -166,7 +164,6 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) - log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { @@ -175,14 +172,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: log, + Log: c.log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: log, + Logger: c.log, Subscriber: subs, }) fatalOnErr(err) @@ -200,7 +197,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, + c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -211,11 +208,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, + c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 7dfb4fe12..0e90e7707 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,7 +8,6 @@ import ( "net" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -105,7 +104,9 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) + if !ok { + panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) + } } return @@ -123,11 +124,7 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - for s := range ni.NetworkEndpoints() { - if f(s) { - return - } - } + ni.IterateNetworkEndpoints(f) } } @@ -187,7 +184,7 @@ func addNewEpochNotificationHandlers(c *cfg) { c.updateContractNodeInfo(ctx, e) - if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } @@ -209,12 +206,14 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(ctx context.Context, c *cfg) { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return + if c.needBootstrap() { + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return + } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -350,6 +349,8 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } +var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") + func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: @@ -361,6 +362,10 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro c.stopMaintenance(ctx) + if !c.needBootstrap() { + return errRelayBootstrap + } + if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) return bootstrapOnline(ctx, c) diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go deleted file mode 100644 index e6be9cdf5..000000000 --- a/cmd/frostfs-node/netmap_source.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" - netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -type rawNetmapSource struct { - client *netmapClient.Client -} - -func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { - return &rawNetmapSource{ - client: client, - } -} - -func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMap(ctx, diff) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - nm, err := s.client.GetNetMapByEpoch(ctx, epoch) - if err != nil { - return nil, err - } - candidates, err := s.client.GetCandidates(ctx) - if err != nil { - return nil, err - } - updates := getNetMapNodesToUpdate(nm, candidates) - if len(updates) > 0 { - mergeNetmapWithCandidates(updates, nm) - } - return nm, nil -} - -func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { - return s.client.Epoch(ctx) -} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c33c02b3f..ad6f4140a 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -16,6 +16,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" @@ -31,7 +32,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -172,10 +172,12 @@ func initObjectService(c *cfg) { splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, &irFetcher, splitSvc) + apeSvc := createAPEService(c, splitSvc) + + aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) var commonSvc objectService.Common - commonSvc.Init(&c.internals, apeSvc) + commonSvc.Init(&c.internals, aclSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -187,9 +189,9 @@ func initObjectService(c *cfg) { respSvc, ) - c.metricsSvc = objectService.NewMetricCollector( + c.shared.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) + qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) @@ -218,8 +220,9 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) + pol := policer.New( - policer.WithLogger(c.log.WithTag(logger.TagPolicer)), + policer.WithLogger(c.log), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -281,7 +284,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { +func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { return &innerRingFetcherWithNotary{ sidechain: c.cfgMorph.client, } @@ -291,7 +294,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), + replicator.WithLogger(c.log), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), @@ -348,7 +351,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav c.netMapSource, keyStorage, containerSource, - searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), + searchsvc.WithLogger(c.log), ) } @@ -374,7 +377,7 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ), coreConstructor, containerSource, - getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) + getsvc.WithLogger(c.log)) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -385,7 +388,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), + getsvcV2.WithLogger(c.log), ) } @@ -402,7 +405,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), + deletesvc.WithLogger(c.log), ) } @@ -426,19 +429,28 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi ) } -func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { + return v2.New( + apeSvc, + c.netMapSource, + irFetcher, + c.cfgObject.cnrSource, + v2.WithLogger(c.log), + ) +} + +func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.frostfsidClient, + c.shared.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, c.binPublicKey, ), - objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index e4da8119f..5b40c8a88 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) { func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.pprof == nil { - c.pprof = new(httpComponent) - c.pprof.cfg = c - c.pprof.name = "pprof" - c.pprof.handler = httputil.Handler() - c.pprof.preReload = tuneProfilers + if c.dynamicConfiguration.pprof == nil { + c.dynamicConfiguration.pprof = new(httpComponent) + c.dynamicConfiguration.pprof.cfg = c + c.dynamicConfiguration.pprof.name = "pprof" + c.dynamicConfiguration.pprof.handler = httputil.Handler() + c.dynamicConfiguration.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.pprof.enabled { - c.pprof.enabled = enabled + if enabled != c.dynamicConfiguration.pprof.enabled { + c.dynamicConfiguration.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.pprof.address { - c.pprof.address = address + if address != c.dynamicConfiguration.pprof.address { + c.dynamicConfiguration.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.pprof.shutdownDur { - c.pprof.shutdownDur = dur + if dur != c.dynamicConfiguration.pprof.shutdownDur { + c.dynamicConfiguration.pprof.shutdownDur = dur updated = true } - return c.pprof, updated + return c.dynamicConfiguration.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go index 6394b668b..9663fc6ae 100644 --- a/cmd/frostfs-node/qos.go +++ b/cmd/frostfs-node/qos.go @@ -43,9 +43,6 @@ func initQoSService(c *cfg) { func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { rawTag, defined := qosTagging.IOTagFromContext(ctx) if !defined { - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) - } return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } ioTag, err := qos.FromRawString(rawTag) @@ -76,8 +73,20 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) case qos.IOTagInternal: - if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { - return ctx + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } } s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) @@ -86,23 +95,3 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } } - -func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { - for _, pk := range s.allowedInternalPubs { - if bytes.Equal(pk, publicKey) { - return true - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return false - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), publicKey) { - return true - } - } - - return false -} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go deleted file mode 100644 index 971f9eebf..000000000 --- a/cmd/frostfs-node/qos_test.go +++ /dev/null @@ -1,226 +0,0 @@ -package main - -import ( - "context" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestQoSService_Client(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag client defined", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Request) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func TestQoSService_Internal(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) - t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagInternal.String(), tag) - }) -} - -func TestQoSService_Critical(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) - t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.Critical) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagCritical.String(), tag) - }) -} - -func TestQoSService_NetmapGetError(t *testing.T) { - t.Parallel() - s, pk := testQoSServicePrepare(t) - s.netmapSource = &utilTesting.TestNetmapSource{} - t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { - ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) - t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { - ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") - ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) - tag, ok := tagging.IOTagFromContext(ctx) - require.True(t, ok) - require.Equal(t, qos.IOTagClient.String(), tag) - }) -} - -func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { - nmSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - reqSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedCritSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - allowedIntSigner, err := keys.NewPrivateKey() - require.NoError(t, err) - - var node netmap.NodeInfo - node.SetPublicKey(nmSigner.PublicKey().Bytes()) - nm := &netmap.NetMap{} - nm.SetEpoch(100) - nm.SetNodes([]netmap.NodeInfo{node}) - - return &cfgQoSService{ - logger: test.NewLogger(t), - netmapSource: &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ - 100: nm, - }, - CurrentEpoch: 100, - }, - allowedCriticalPubs: [][]byte{ - allowedCritSigner.PublicKey().Bytes(), - }, - allowedInternalPubs: [][]byte{ - allowedIntSigner.PublicKey().Bytes(), - }, - }, - &testQoSServicePublicKeys{ - NetmapNode: nmSigner.PublicKey().Bytes(), - Request: reqSigner.PublicKey().Bytes(), - Internal: allowedIntSigner.PublicKey().Bytes(), - Critical: allowedCritSigner.PublicKey().Bytes(), - } -} - -type testQoSServicePublicKeys struct { - NetmapNode []byte - Request []byte - Internal []byte - Critical []byte -} diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index fbfe3f5e6..2f3c9cbfe 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -56,7 +55,7 @@ func initSessionService(c *cfg) { server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), ), ) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 62af45389..65414f0ca 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,7 +14,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -52,12 +51,12 @@ func initTreeService(c *cfg) { c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.cnrClient, + cli: c.shared.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.frostfsidClient), + tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), + tree.WithLogger(c.log), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index 22d2e0aa9..ae52b9e4a 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,11 +30,6 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } - err = loggerPrm.SetTags(loggerconfig.Tags(c)) - if err != nil { - return fmt.Errorf("invalid list of allowed tags: %w", err) - } - // shard configuration validation shardNum := 0 diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index 495365cf0..d9c0f167f 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,6 +1,7 @@ package main import ( + "os" "path/filepath" "testing" @@ -21,4 +22,17 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) + + t.Run("mainnet", func(t *testing.T) { + os.Clearenv() // ENVs have priority over config files, so we do this in tests + p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") + c := config.New(p, "", config.EnvPrefix) + require.NoError(t, validateConfig(c)) + }) + t.Run("testnet", func(t *testing.T) { + os.Clearenv() // ENVs have priority over config files, so we do this in tests + p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") + c := config.New(p, "", config.EnvPrefix) + require.NoError(t, validateConfig(c)) + }) } diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index 13f447af4..b8acf0143 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -51,13 +51,8 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { } cmd.PrintErrln(err) - for p := cmd; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { - p.PersistentPostRun(cmd, nil) - if !cobra.EnableTraverseRunHooks { - break - } - } + if cmd.PersistentPostRun != nil { + cmd.PersistentPostRun(cmd, nil) } os.Exit(code) } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index 5dd1a060e..f550552d2 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -27,15 +27,15 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - for endpoint := range node.NetworkEndpoints() { + netmap.IterateNetworkEndpoints(node, func(endpoint string) { cmd.Printf("%s ", endpoint) - } + }) cmd.Println() if !short { - for key, value := range node.Attributes() { + node.IterateAttributes(func(key, value string) { cmd.Printf("%s\t%s: %s\n", indent, key, value) - } + }) } } diff --git a/config/example/ir.env b/config/example/ir.env index c13044a6e..ebd91c243 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,7 +1,5 @@ FROSTFS_IR_LOGGER_LEVEL=info FROSTFS_IR_LOGGER_TIMESTAMP=true -FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX diff --git a/config/example/ir.yaml b/config/example/ir.yaml index ed53f014b..49f9fd324 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -3,9 +3,6 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true - tags: - - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. - level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file diff --git a/config/example/node.env b/config/example/node.env index 9a2426358..010b6840c 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,8 +1,6 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald FROSTFS_LOGGER_TIMESTAMP=true -FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" -FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 @@ -22,9 +20,9 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" +FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state -FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true @@ -123,8 +121,7 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true -FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest +FROSTFS_STORAGE_SHARD_0_COMPRESS=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 @@ -183,10 +180,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 -FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 @@ -204,9 +197,6 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 -FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor diff --git a/config/example/node.json b/config/example/node.json index 6b7a9c2c6..b26c35d2c 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -2,13 +2,7 @@ "logger": { "level": "debug", "destination": "journald", - "timestamp": true, - "tags": [ - { - "names": "main, morph", - "level": "debug" - } - ] + "timestamp": true }, "pprof": { "enabled": true, @@ -37,13 +31,13 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", + "relay": true, "persistent_sessions": { "path": "/sessions" }, "persistent_state": { "path": "/state" - }, - "locode_db_path": "/path/to/locode/db" + } }, "grpc": { "0": { @@ -188,15 +182,12 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compression": { - "enabled": true, - "level": "fastest", - "exclude_content_types": [ - "audio/*", "video/*" - ], - "estimate_compressibility": true, - "estimate_compressibility_threshold": 0.7 - }, + "compress": true, + "compression_exclude_content_types": [ + "audio/*", "video/*" + ], + "compression_estimate_compressibility": true, + "compression_estimate_compressibility_threshold": 0.7, "small_object_size": 102400, "blobstor": [ { @@ -261,13 +252,7 @@ { "tag": "policer", "weight": 5, - "limit_ops": 25000, - "prohibited": true - }, - { - "tag": "treesync", - "weight": 5, - "limit_ops": 25 + "limit_ops": 25000 } ] }, @@ -302,11 +287,6 @@ "tag": "policer", "weight": 50, "limit_ops": 2500 - }, - { - "tag": "treesync", - "weight": 50, - "limit_ops": 100 } ] } @@ -330,9 +310,7 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compression": { - "enabled": false - }, + "compress": false, "small_object_size": 102400, "blobstor": [ { diff --git a/config/example/node.yaml b/config/example/node.yaml index 2d4bc90fb..58b687d5c 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -2,9 +2,6 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" timestamp: true - tags: - - names: "main, morph" - level: debug systemdnotify: enabled: true @@ -34,11 +31,11 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK + relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node - "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server @@ -98,9 +95,6 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 - netmap: - candidates: - poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection @@ -140,6 +134,7 @@ rpc: max_ops: 10000 storage: + # note: shard configuration can be omitted for relay node (see `node.relay`) shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: @@ -153,7 +148,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0o644 # permissions for metabase files(directories: +x for current user and group) + perm: 0644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -161,19 +156,18 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compression: - enabled: false # turn on/off zstd compression of stored objects + compress: false # turn on/off zstd(level 3) compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -204,14 +198,12 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compression: - enabled: true # turn on/off zstd compression of stored objects - level: fastest - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 + compress: true # turn on/off zstd(level 3) compression of stored objects + compression_exclude_content_types: + - audio/* + - video/* + compression_estimate_compressibility: true + compression_estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza @@ -257,10 +249,6 @@ storage: - tag: policer weight: 5 limit_ops: 25000 - prohibited: true - - tag: treesync - weight: 5 - limit_ops: 25 write: max_running_ops: 1000 max_waiting_ops: 100 @@ -283,9 +271,6 @@ storage: - tag: policer weight: 50 limit_ops: 2500 - - tag: treesync - weight: 50 - limit_ops: 100 1: writecache: @@ -305,7 +290,7 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0o644 # permission to use for the database file and intermediate directories + perm: 0644 # permission to use for the database file and intermediate directories tracing: enabled: true diff --git a/config/mainnet/README.md b/config/mainnet/README.md new file mode 100644 index 000000000..717a9b0ff --- /dev/null +++ b/config/mainnet/README.md @@ -0,0 +1,28 @@ +# N3 Mainnet Storage node configuration + +Here is a template for simple storage node configuration in N3 Mainnet. +Make sure to specify correct values instead of `<...>` placeholders. +Do not change `contracts` section. Run the latest frostfs-node release with +the fixed config `frostfs-node -c config.yml` + +To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. +The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` +(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) + +## Tips + +Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. +```yaml +node: + addresses: + - grpcs://frostfs.my.org:8080 + +grpc: + num: 1 + 0: + endpoint: frostfs.my.org:8080 + tls: + enabled: true + certificate: /path/to/cert + key: /path/to/key +``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml new file mode 100644 index 000000000..d86ea451f --- /dev/null +++ b/config/mainnet/config.yml @@ -0,0 +1,70 @@ +node: + wallet: + path: + address: + password: + addresses: + - + attribute_0: UN-LOCODE: + attribute_1: Price:100000 + attribute_2: User-Agent:FrostFS\/0.9999 + +grpc: + num: 1 + 0: + endpoint: + tls: + enabled: false + +storage: + shard_num: 1 + shard: + 0: + metabase: + path: /storage/path/metabase + perm: 0600 + blobstor: + - path: /storage/path/blobovnicza + type: blobovnicza + perm: 0600 + opened_cache_capacity: 32 + depth: 1 + width: 1 + - path: /storage/path/fstree + type: fstree + perm: 0600 + depth: 4 + writecache: + enabled: false + gc: + remover_batch_size: 100 + remover_sleep_interval: 1m + +logger: + level: info + +prometheus: + enabled: true + address: localhost:9090 + shutdown_timeout: 15s + +object: + put: + remote_pool_size: 100 + local_pool_size: 100 + +morph: + rpc_endpoint: + - wss://rpc1.morph.frostfs.info:40341/ws + - wss://rpc2.morph.frostfs.info:40341/ws + - wss://rpc3.morph.frostfs.info:40341/ws + - wss://rpc4.morph.frostfs.info:40341/ws + - wss://rpc5.morph.frostfs.info:40341/ws + - wss://rpc6.morph.frostfs.info:40341/ws + - wss://rpc7.morph.frostfs.info:40341/ws + dial_timeout: 20s + +contracts: + balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 + container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 + netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md new file mode 100644 index 000000000..e2cda33ec --- /dev/null +++ b/config/testnet/README.md @@ -0,0 +1,129 @@ +# N3 Testnet Storage node configuration + +There is a prepared configuration for NeoFS Storage Node deployment in +N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared +docker image and run it with docker-compose. + +## Build image + +Prepared **frostfs-storage-testnet** image is available at Docker Hub. +However, if you need to rebuild it for some reason, run +`make image-storage-testnet` command. + +``` +$ make image-storage-testnet +... +Successfully built ab0557117b02 +Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 +``` + +## Deploy node + +To run a storage node in N3 Testnet environment, you should deposit GAS assets, +update docker-compose file and start the node. + +### Deposit + +The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a +bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. + +First, obtain GAS in N3 Testnet chain. You can do that with +[faucet](https://neowish.ngd.network) service. + +Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. +You can provide scripthash in the `data` argument of transfer tx to make a +deposit to a specified account. Otherwise, deposit is made to the tx sender. + +NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, +so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. + +See a deposit example with `neo-go`. + +``` +neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ +--from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ +--to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ +--token GAS \ +--amount 1 +``` + +### Configure + +Next, configure `node_config.env` file. Change endpoints values. Both +should contain your **public** IP. + +``` +NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 +NEOFS_NODE_ADDRESSES=65.52.183.157:36512 +``` + +Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) +attribute. + +``` +NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 +NEOFS_NODE_ADDRESSES=65.52.183.157:36512 +NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED +``` + +You can validate UN/LOCODE attribute in +[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0) +with frostfs-cli. + +``` +$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' +Country: Russia +Location: Saint Petersburg (ex Leningrad) +Continent: Europe +Subdivision: [SPE] Sankt-Peterburg +Coordinates: 59.53, 30.15 +``` + +It is recommended to pass the node's key as a file. To do so, convert your wallet +WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. + +``` +// Print WIF in a 32-byte hex format +$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s +PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 +PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 +WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s +Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ +ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc +ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf + +// Save 32-byte hex into a file +$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key +``` + +Then, specify the path to this file in `docker-compose.yml` +```yaml + volumes: + - frostfs_storage:/storage + - ./my_wallet.key:/node.key +``` + + +NeoFS objects will be stored on your machine. By default, docker-compose +is configured to store objects in named docker volume `frostfs_storage`. You can +specify a directory on the filesystem to store objects there. + +```yaml + volumes: + - /home/username/frostfs/rc3/storage:/storage + - ./my_wallet.key:/node.key +``` + +### Start + +Run the node with `docker-compose up` command and stop it with `docker-compose down`. + +### Debug + +To print node logs, use `docker logs frostfs-testnet`. To print debug messages in +log, set up log level to debug with this env: + +```yaml + environment: + - NEOFS_LOGGER_LEVEL=debug +``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml new file mode 100644 index 000000000..76b36cdf6 --- /dev/null +++ b/config/testnet/config.yml @@ -0,0 +1,52 @@ +logger: + level: info + +morph: + rpc_endpoint: + - wss://rpc01.morph.testnet.frostfs.info:51331/ws + - wss://rpc02.morph.testnet.frostfs.info:51331/ws + - wss://rpc03.morph.testnet.frostfs.info:51331/ws + - wss://rpc04.morph.testnet.frostfs.info:51331/ws + - wss://rpc05.morph.testnet.frostfs.info:51331/ws + - wss://rpc06.morph.testnet.frostfs.info:51331/ws + - wss://rpc07.morph.testnet.frostfs.info:51331/ws + dial_timeout: 20s + +contracts: + balance: e0420c216003747626670d1424569c17c79015bf + container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 + netmap: d4b331639799e2958d4bc5b711b469d79de94e01 + +node: + key: /node.key + attribute_0: Deployed:SelfHosted + attribute_1: User-Agent:FrostFS\/0.9999 + +prometheus: + enabled: true + address: localhost:9090 + shutdown_timeout: 15s + +storage: + shard_num: 1 + shard: + 0: + metabase: + path: /storage/metabase + perm: 0777 + blobstor: + - path: /storage/path/blobovnicza + type: blobovnicza + perm: 0600 + opened_cache_capacity: 32 + depth: 1 + width: 1 + - path: /storage/path/fstree + type: fstree + perm: 0600 + depth: 4 + writecache: + enabled: false + gc: + remover_batch_size: 100 + remover_sleep_interval: 1m diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 6cc4ab13c..3b459335b 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,7 +51,10 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. +Shard can automatically switch to a `degraded-read-only` mode in 3 cases: +1. If the metabase was not available or couldn't be opened/initialized during shard startup. +2. If shard error counter exceeds threshold. +3. If the metabase couldn't be reopened during SIGHUP handling. # Detach shard diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index da9fdfed0..51f0a9669 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,23 +12,22 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|--------------|---------------------------------------------------------| -| `node` | [Node parameters](#node-section) | -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | -| `qos` | [QoS configuration](#qos-section) | +| Section | Description | +|------------------------|---------------------------------------------------------------------| +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -112,21 +111,11 @@ Contains logger parameters. ```yaml logger: level: info - tags: - - names: "main, morph" - level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | -| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | - -## `tags` subsection -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | -| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract @@ -159,19 +148,15 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m - netmap: - candidates: - poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | -| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | +| Parameter | Type | Default value | Description | +| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | @@ -195,41 +180,21 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | -| `compression` | [Compression config](#compression-subsection) | | Compression config. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | -| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | - -### `compression` subsection - -Contains compression config. - -```yaml -compression: - enabled: true - level: smallest_size - exclude_content_types: - - audio/* - - video/* - estimate_compressibility: true - estimate_compressibility_threshold: 0.7 -``` - -| Parameter | Type | Default value | Description | -| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `enabled` | `bool` | `false` | Flag to enable compression. | -| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | -| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | +| Parameter | Type | Default value | Description | +| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `compress` | `bool` | `false` | Flag to enable compression. | +| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | ### `blobstor` subsection @@ -244,7 +209,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0o644 + perm: 0644 size: 4194304 depth: 1 width: 4 @@ -304,7 +269,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0o644 + perm: 0644 max_batch_size: 200 max_batch_delay: 20ms ``` @@ -394,7 +359,6 @@ limits: | `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | | `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | | `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | -| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section @@ -410,22 +374,22 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" + relay: false persistent_sessions: path: /sessions persistent_state: path: /state - locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | -| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `relay` | `bool` | | Enable relay mode. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | ## `wallet` subsection N3 wallet configuration. diff --git a/go.mod b/go.mod index fb45c3874..eeaca1645 100644 --- a/go.mod +++ b/go.mod @@ -1,18 +1,18 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.23.0 +go 1.23 require ( code.gitea.io/sdk/gitea v0.17.1 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 @@ -28,7 +28,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.15.0 + github.com/multiformats/go-multiaddr v0.14.0 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -44,9 +44,10 @@ require ( go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 - golang.org/x/term v0.30.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 @@ -85,9 +86,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -123,14 +124,13 @@ require ( go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/text v0.21.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.4.0 // indirect + lukechampine.com/blake3 v1.2.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index acc26af36..a8f7216a5 100644 --- a/go.sum +++ b/go.sum @@ -4,22 +4,22 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= @@ -145,14 +145,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -192,8 +192,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= -github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -324,15 +324,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= -golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -353,8 +353,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -381,16 +381,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -398,15 +398,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -445,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= +lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/assert/cond.go b/internal/assert/cond.go index 113d2eba9..701036fa8 100644 --- a/internal/assert/cond.go +++ b/internal/assert/cond.go @@ -1,29 +1,9 @@ package assert -import ( - "fmt" - "strings" -) +import "strings" func True(cond bool, details ...string) { if !cond { panic(strings.Join(details, " ")) } } - -func False(cond bool, details ...string) { - if cond { - panic(strings.Join(details, " ")) - } -} - -func NoError(err error, details ...string) { - if err != nil { - content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) - panic(content) - } -} - -func Fail(details ...string) { - panic(strings.Join(details, " ")) -} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 626372f43..3503c922e 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -198,7 +198,6 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" - EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" @@ -513,9 +512,7 @@ const ( FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" FailedToParseIncomingIOTag = "failed to parse incoming IO tag" NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" - FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`" FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" - FailedToUpdateNetmapCandidates = "update netmap candidates failed" - UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go index be6878142..17fb67a27 100644 --- a/internal/metrics/qos.go +++ b/internal/metrics/qos.go @@ -15,7 +15,7 @@ func newQoSMetrics() *QoSMetrics { Namespace: namespace, Subsystem: qosSubsystem, Name: "operations_total", - Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", + Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard", }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), } } diff --git a/internal/qos/config.go b/internal/qos/config.go deleted file mode 100644 index d90b403b5..000000000 --- a/internal/qos/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package qos - -import ( - "math" - "time" -) - -const ( - NoLimit int64 = math.MaxInt64 - DefaultIdleTimeout = 5 * time.Minute -) - -type LimiterConfig struct { - Read OpConfig - Write OpConfig -} - -type OpConfig struct { - MaxWaitingOps int64 - MaxRunningOps int64 - IdleTimeout time.Duration - Tags []IOTagConfig -} - -type IOTagConfig struct { - Tag string - Weight *float64 - LimitOps *float64 - ReservedOps *float64 - Prohibited bool -} diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go index 58cd9e52c..534a1f74b 100644 --- a/internal/qos/grpc.go +++ b/internal/qos/grpc.go @@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor if err != nil { tag = IOTagClient } - if tag.IsLocal() { + if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { tag = IOTagInternal } ctx = tagging.ContextWithIOTag(ctx, tag.String()) @@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto if err != nil { tag = IOTagClient } - if tag.IsLocal() { + if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { tag = IOTagInternal } ctx = tagging.ContextWithIOTag(ctx, tag.String()) diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go deleted file mode 100644 index 7d0826754..000000000 --- a/internal/qos/grpc_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package qos_test - -import ( - "context" - "errors" - "fmt" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" - "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -const ( - okKey = "ok" -) - -var ( - errTest = errors.New("mock") - errWrongTag = errors.New("wrong tag") - errNoTag = errors.New("failed to get tag from context") - errResExhausted *apistatus.ResourceExhausted - tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} -) - -type mockGRPCServerStream struct { - grpc.ServerStream - - ctx context.Context -} - -func (m *mockGRPCServerStream) Context() context.Context { - return m.ctx -} - -type limiter struct { - acquired bool - released bool -} - -func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { - l.acquired = true - if key != okKey { - return nil, false - } - return func() { l.released = true }, true -} - -func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(ctx context.Context, req any) (any, error) { - return nil, errTest - } - _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) - return err -} - -func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { - interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) - handler := func(srv any, stream grpc.ServerStream) error { - return errTest - } - err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ - FullMethod: methodName, - }, handler) - return err -} - -func Test_MaxActiveRPCLimiter(t *testing.T) { - // UnaryServerInterceptor - t.Run("unary fail", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := unaryMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("unary pass", func(t *testing.T) { - var lim limiter - - err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) - // StreamServerInterceptor - t.Run("stream fail", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") - require.ErrorAs(t, err, &errResExhausted) - require.True(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass critical", func(t *testing.T) { - var lim limiter - ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) - - err := streamMaxActiveRPCLimiter(ctx, &lim, "") - require.ErrorIs(t, err, errTest) - require.False(t, lim.acquired) - require.False(t, lim.released) - }) - t.Run("stream pass", func(t *testing.T) { - var lim limiter - - err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) - require.ErrorIs(t, err, errTest) - require.True(t, lim.acquired) - require.True(t, lim.released) - }) -} - -func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { - interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() - called := false - handler := func(ctx context.Context, req any) (any, error) { - called = true - if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { - return nil, nil - } - return nil, errWrongTag - } - _, err := interceptor(context.Background(), nil, nil, handler) - require.NoError(t, err) - require.True(t, called) -} - -func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() - - // check context with no value - called := false - invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil - } - require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) - require.True(t, called) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return errNoTag - } - if raw != targetTag { - return errWrongTag - } - return nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) -} - -func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { - interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() - - // check context with no value - called := false - streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - called = true - if _, ok := tagging.IOTagFromContext(ctx); ok { - return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) - } - return nil, nil - } - _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) - require.True(t, called) - require.NoError(t, err) - - // check context for internal tag - targetTag := qos.IOTagInternal.String() - streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { - raw, ok := tagging.IOTagFromContext(ctx) - if !ok { - return nil, errNoTag - } - if raw != targetTag { - return nil, errWrongTag - } - return nil, nil - } - for _, tag := range tags { - ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) - _, err := interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) - } - - // check context for client tag - ctx := tagging.ContextWithIOTag(context.Background(), "") - targetTag = qos.IOTagClient.String() - _, err = interceptor(ctx, nil, nil, "", streamer, nil) - require.NoError(t, err) -} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 2d7de32fc..e92cef652 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -8,6 +8,7 @@ import ( "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -36,15 +37,15 @@ type scheduler interface { Close() } -func NewLimiter(c LimiterConfig) (Limiter, error) { - if err := c.Validate(); err != nil { +func NewLimiter(c *limits.Config) (Limiter, error) { + if err := validateConfig(c); err != nil { return nil, err } - readScheduler, err := createScheduler(c.Read) + readScheduler, err := createScheduler(c.Read()) if err != nil { return nil, fmt.Errorf("create read scheduler: %w", err) } - writeScheduler, err := createScheduler(c.Write) + writeScheduler, err := createScheduler(c.Write()) if err != nil { return nil, fmt.Errorf("create write scheduler: %w", err) } @@ -62,8 +63,8 @@ func NewLimiter(c LimiterConfig) (Limiter, error) { return l, nil } -func createScheduler(config OpConfig) (scheduler, error) { - if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { +func createScheduler(config limits.OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit { return newSemaphoreScheduler(config.MaxRunningOps), nil } return scheduling.NewMClock( @@ -71,9 +72,9 @@ func createScheduler(config OpConfig) (scheduler, error) { converToSchedulingTags(config.Tags), config.IdleTimeout) } -func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { +func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { result := make(map[string]scheduling.TagInfo) - for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { + for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} { result[tag.String()] = scheduling.TagInfo{ Share: defaultShare, } @@ -89,7 +90,6 @@ func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo if l.ReservedOps != nil && *l.ReservedOps != 0 { v.ReservedIOPS = l.ReservedOps } - v.Prohibited = l.Prohibited result[l.Tag] = v } return result @@ -149,11 +149,6 @@ func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { } func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } tag, ok := tagging.IOTagFromContext(ctx) if !ok { tag = IOTagClient.String() @@ -169,7 +164,8 @@ func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (R rel, err := s.RequestArrival(ctx, tag) stat.inProgress.Add(1) if err != nil { - if isResourceExhaustedErr(err) { + if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) { stat.resourceExhausted.Add(1) return nil, &apistatus.ResourceExhausted{} } @@ -238,9 +234,3 @@ func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation s metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) } } - -func isResourceExhaustedErr(err error) bool { - return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || - errors.Is(err, errSemaphoreLimitExceeded) || - errors.Is(err, scheduling.ErrTagRequestsProhibited) -} diff --git a/internal/qos/stats.go b/internal/qos/stats.go index 3ecfad9f9..f077f552b 100644 --- a/internal/qos/stats.go +++ b/internal/qos/stats.go @@ -3,13 +3,12 @@ package qos const unknownStatsTag = "unknown" var statTags = map[string]struct{}{ - IOTagBackground.String(): {}, IOTagClient.String(): {}, - IOTagCritical.String(): {}, + IOTagBackground.String(): {}, IOTagInternal.String(): {}, IOTagPolicer.String(): {}, - IOTagTreeSync.String(): {}, IOTagWritecache.String(): {}, + IOTagCritical.String(): {}, unknownStatsTag: {}, } diff --git a/internal/qos/tags.go b/internal/qos/tags.go index e3f7cafd6..9db45f190 100644 --- a/internal/qos/tags.go +++ b/internal/qos/tags.go @@ -10,33 +10,30 @@ import ( type IOTag string const ( - IOTagBackground IOTag = "background" IOTagClient IOTag = "client" - IOTagCritical IOTag = "critical" IOTagInternal IOTag = "internal" - IOTagPolicer IOTag = "policer" - IOTagTreeSync IOTag = "treesync" + IOTagBackground IOTag = "background" IOTagWritecache IOTag = "writecache" + IOTagPolicer IOTag = "policer" + IOTagCritical IOTag = "critical" ioTagUnknown IOTag = "" ) func FromRawString(s string) (IOTag, error) { switch s { - case string(IOTagBackground): - return IOTagBackground, nil - case string(IOTagClient): - return IOTagClient, nil case string(IOTagCritical): return IOTagCritical, nil + case string(IOTagClient): + return IOTagClient, nil case string(IOTagInternal): return IOTagInternal, nil - case string(IOTagPolicer): - return IOTagPolicer, nil - case string(IOTagTreeSync): - return IOTagTreeSync, nil + case string(IOTagBackground): + return IOTagBackground, nil case string(IOTagWritecache): return IOTagWritecache, nil + case string(IOTagPolicer): + return IOTagPolicer, nil default: return ioTagUnknown, fmt.Errorf("unknown tag %s", s) } @@ -53,7 +50,3 @@ func IOTagFromContext(ctx context.Context) string { } return tag } - -func (t IOTag) IsLocal() bool { - return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync -} diff --git a/internal/qos/validate.go b/internal/qos/validate.go index 70f1f24e8..3fa4ebbd1 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" "math" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" ) var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") @@ -12,17 +14,17 @@ type tagConfig struct { Shares, Limit, Reserved *float64 } -func (c *LimiterConfig) Validate() error { - if err := validateOpConfig(c.Read); err != nil { +func validateConfig(c *limits.Config) error { + if err := validateOpConfig(c.Read()); err != nil { return fmt.Errorf("limits 'read' section validation error: %w", err) } - if err := validateOpConfig(c.Write); err != nil { + if err := validateOpConfig(c.Write()); err != nil { return fmt.Errorf("limits 'write' section validation error: %w", err) } return nil } -func validateOpConfig(c OpConfig) error { +func validateOpConfig(c limits.OpConfig) error { if c.MaxRunningOps <= 0 { return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) } @@ -38,14 +40,13 @@ func validateOpConfig(c OpConfig) error { return nil } -func validateTags(configTags []IOTagConfig) error { +func validateTags(configTags []limits.IOTagConfig) error { tags := map[IOTag]tagConfig{ - IOTagBackground: {}, IOTagClient: {}, IOTagInternal: {}, - IOTagPolicer: {}, - IOTagTreeSync: {}, + IOTagBackground: {}, IOTagWritecache: {}, + IOTagPolicer: {}, } for _, t := range configTags { tag, err := FromRawString(t.Tag) diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index 91ee5c6c3..d4bc0cf68 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,7 +3,6 @@ package client import ( "bytes" "fmt" - "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -20,7 +19,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - Addresses() iter.Seq[string] + IterateAddresses(func(string) bool) NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index 61c568052..a24b36944 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -26,10 +26,10 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { // IsIndexedContainer returns True if container attributes should be indexed. func IsIndexedContainer(cnr containerSDK.Container) bool { var isS3Container bool - for key := range cnr.Attributes() { + cnr.IterateAttributes(func(key, _ string) { if key == ".s3-location-constraint" { isS3Container = true } - } + }) return !isS3Container } diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index e58e42634..b0c9e1f9e 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,10 +1,6 @@ package netmap -import ( - "iter" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) +import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -18,20 +14,10 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } -// Addresses returns an iterator over all announced network addresses. -func (x Node) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() -} - // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. -// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { - for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { - if f(s) { - return - } - } + (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index dc336eb34..239a9f389 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -411,11 +410,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -484,12 +483,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -560,12 +559,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &utilTesting.TestNetmapSource{ - Netmaps: map[uint64]*netmap.NetMap{ + &testNetmapSource{ + netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - CurrentEpoch: curEpoch, + currentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -597,3 +596,26 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } + +type testNetmapSource struct { + netmaps map[uint64]*netmap.NetMap + currentEpoch uint64 +} + +func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { + if diff >= s.currentEpoch { + return nil, fmt.Errorf("invalid diff") + } + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) +} + +func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.netmaps[epoch]; found { + return nm, nil + } + return nil, fmt.Errorf("netmap not found") +} + +func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { + return s.currentEpoch, nil +} diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index aab12ebf9..67c9a3188 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,13 +13,6 @@ type ECInfo struct { Total uint32 } -func (v *ECInfo) String() string { - if v == nil { - return "" - } - return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) -} - // Info groups object address with its FrostFS // object info. type Info struct { @@ -30,5 +23,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) + return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 3d236641e..f7b71dbe6 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -50,7 +50,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), @@ -159,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, FrostFSClient: frostfsCli, AlphabetState: s, @@ -225,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, @@ -247,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, AlphabetState: s, @@ -268,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, FrostFSClient: frostfsCli, @@ -291,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log.WithTag(logger.TagProcessor), + Log: s.log, Metrics: s.irMetrics, PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, @@ -342,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log.WithTag(logger.TagGrpcSvc), audit) + ), log, audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -458,7 +458,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- } morphChain := &chainParams{ - log: s.log.WithTag(logger.TagMorph), + log: s.log, cfg: cfg, key: s.key, name: morphPrefix, diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 3a5137261..ae5661905 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log.WithTag(logger.TagIr), + log: log, irMetrics: metrics, cmode: cmode, } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index a6c40f9fa..08ef8b86c 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Blobovnicza")) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index 3e8b9f07b..d9e99d0d1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -158,11 +158,11 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { +func (b *Blobovniczas) SetCompressor(cc *compression.Config) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Compressor { +func (b *Blobovniczas) Compressor() *compression.Config { return b.compression } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index f87f4a144..ec9743b57 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,8 +19,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index df2b4ffe5..5414140f0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,8 +19,7 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index 9244d765c..d390ecf1d 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,8 +15,7 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -44,8 +43,7 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 6438f715b..f2f9509ad 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDBManager struct { +// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDbManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -157,8 +157,8 @@ type levelDBManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDBManager { - result := &levelDBManager{ +) *levelDbManager { + result := &levelDbManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -181,14 +181,14 @@ func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -202,7 +202,7 @@ func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDBManager) hasAnyDB() bool { +func (m *levelDbManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -213,7 +213,7 @@ func (m *levelDBManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDBManager + levelToManager map[string]*levelDbManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDBManager), + levelToManager: make(map[string]*levelDbManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -266,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 5f268b0f2..0e1b2022e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -19,7 +19,7 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Compressor + compression *compression.Config blzOpts []blobovnicza.Option reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics @@ -63,15 +63,10 @@ func initConfig(c *cfg) { } } -func WithBlobovniczaTreeLogger(log *logger.Logger) Option { +func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = log - } -} - -func WithBlobovniczaLogger(log *logger.Logger) Option { - return func(c *cfg) { - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) + c.log = l + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index a840275b8..7ef3317fd 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -226,7 +226,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) - sysPath += rebuildSuffix + sysPath = sysPath + rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) if err != nil { return nil, err @@ -328,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -341,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDB.CloseAndRemoveFile(ctx); err != nil { + if err := shDb.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 4146ef260..8832603c4 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -140,8 +140,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index a7a99fec3..9c971bfb6 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -50,8 +50,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -107,8 +106,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -162,8 +160,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -234,8 +231,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -266,8 +262,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), WithBlobovniczaShallowDepth(1), @@ -309,8 +304,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -338,8 +332,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -381,8 +374,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -423,8 +415,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta b = NewBlobovniczaTree( context.Background(), - WithBlobovniczaLogger(test.NewLogger(t)), - WithBlobovniczaTreeLogger(test.NewLogger(t)), + WithLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index ceaf2538a..f850f48b4 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -41,7 +41,7 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Compressor + compression compression.Config log *logger.Logger storage []SubStorage metrics Metrics @@ -91,13 +91,50 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "BlobStor")) } } -func WithCompression(comp compression.Config) Option { +// WithCompressObjects returns option to toggle +// compression of the stored objects. +// +// If true, Zstandard algorithm is used for data compression. +// +// If compressor (decompressor) creation failed, +// the uncompressed option will be used, and the error +// is recorded in the provided log. +func WithCompressObjects(comp bool) Option { return func(c *cfg) { - c.compression.Config = comp + c.compression.Enabled = comp + } +} + +// WithCompressibilityEstimate returns an option to use +// normilized compressibility estimate to decide compress +// data or not. +// +// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 +func WithCompressibilityEstimate(v bool) Option { + return func(c *cfg) { + c.compression.UseCompressEstimation = v + } +} + +// WithCompressibilityEstimateThreshold returns an option to set +// normilized compressibility estimate threshold. +// +// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 +func WithCompressibilityEstimateThreshold(threshold float64) Option { + return func(c *cfg) { + c.compression.CompressEstimationThreshold = threshold + } +} + +// WithUncompressableContentTypes returns option to disable decompression +// for specific content types as seen by object.AttributeContentType attribute. +func WithUncompressableContentTypes(values []string) Option { + return func(c *cfg) { + c.compression.UncompressableContentTypes = values } } @@ -115,6 +152,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Compressor { - return &b.compression +func (b *BlobStor) Compressor() *compression.Config { + return &b.cfg.compression } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 6ddeb6f00..6cc56fa3b 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -52,9 +51,7 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompression(compression.Config{ - Enabled: compress, - }), + WithCompressObjects(compress), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) require.NoError(t, bs.Init(context.Background())) @@ -116,10 +113,8 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompression(compression.Config{ - Enabled: compress, - UncompressableContentTypes: ct, - }), + WithCompressObjects(compress), + WithUncompressableContentTypes(ct), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index e35c35e60..6ecef48cd 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -18,8 +18,8 @@ type Storage interface { Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Compressor) - Compressor() *compression.Compressor + SetCompressor(cc *compression.Config) + Compressor() *compression.Config // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 445a0494b..9f70f8ec2 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Compressor{Config: Config{Enabled: true}} + c := Config{Enabled: true} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Compressor, data []byte) { +func benchWith(b *testing.B, c Config, data []byte) { b.ResetTimer() b.ReportAllocs() for range b.N { @@ -56,10 +56,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Compressor{ - Config: Config{ - Enabled: true, - }, + c := &Config{ + Enabled: true, } require.NoError(b, c.Init()) @@ -78,10 +76,8 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Compressor{ - Config: Config{ - Enabled: true, - }, + c := &Config{ + Enabled: true, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index c76cec9a1..85ab47692 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,36 +4,21 @@ import ( "bytes" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) -type Level string - -const ( - LevelDefault Level = "" - LevelOptimal Level = "optimal" - LevelFastest Level = "fastest" - LevelSmallestSize Level = "smallest_size" -) - -type Compressor struct { - Config - - encoder *zstd.Encoder - decoder *zstd.Decoder -} - // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string - Level Level - EstimateCompressibility bool - EstimateCompressibilityThreshold float64 + UseCompressEstimation bool + CompressEstimationThreshold float64 + + encoder *zstd.Encoder + decoder *zstd.Decoder } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -41,11 +26,11 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Compressor) Init() error { +func (c *Config) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) + c.encoder, err = zstd.NewWriter(nil) if err != nil { return err } @@ -88,7 +73,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Compressor) Decompress(data []byte) ([]byte, error) { +func (c *Config) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -97,13 +82,13 @@ func (c *Compressor) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Compressor) Compress(data []byte) []byte { +func (c *Config) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.EstimateCompressibility { + if c.UseCompressEstimation { estimated := compress.Estimate(data) - if estimated >= c.EstimateCompressibilityThreshold { + if estimated >= c.CompressEstimationThreshold { return c.compress(data) } return data @@ -111,7 +96,7 @@ func (c *Compressor) Compress(data []byte) []byte { return c.compress(data) } -func (c *Compressor) compress(data []byte) []byte { +func (c *Config) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -121,7 +106,7 @@ func (c *Compressor) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Compressor) Close() error { +func (c *Config) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -131,24 +116,3 @@ func (c *Compressor) Close() error { } return err } - -func (c *Config) HasValidCompressionLevel() bool { - return c.Level == LevelDefault || - c.Level == LevelOptimal || - c.Level == LevelFastest || - c.Level == LevelSmallestSize -} - -func (c *Compressor) compressionLevel() zstd.EncoderLevel { - switch c.Level { - case LevelDefault, LevelOptimal: - return zstd.SpeedDefault - case LevelFastest: - return zstd.SpeedFastest - case LevelSmallestSize: - return zstd.SpeedBestCompression - default: - assert.Fail("unknown compression level", string(c.Level)) - return zstd.SpeedDefault - } -} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 0418eedd0..93316be02 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,7 +6,6 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) @@ -54,10 +53,6 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag func (b *BlobStor) Init(ctx context.Context) error { b.log.Debug(ctx, logs.BlobstorInitializing) - if !b.compression.HasValidCompressionLevel() { - b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) - b.compression.Level = compression.LevelDefault - } if err := b.compression.Init(); err != nil { return err } diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index 3caee7ee1..b5dbc9e40 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -2,8 +2,6 @@ package fstree import ( "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. @@ -54,11 +52,16 @@ func (c *SimpleCounter) Dec(size uint64) { c.mtx.Lock() defer c.mtx.Unlock() - assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") - c.count-- - - assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") - c.size -= size + if c.count > 0 { + c.count-- + } else { + panic("fstree.SimpleCounter: invalid count") + } + if c.size >= size { + c.size -= size + } else { + panic("fstree.SimpleCounter: invalid size") + } } func (c *SimpleCounter) CountSize() (uint64, uint64) { diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 112741ab4..031b385b2 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - compressor *compression.Compressor + *compression.Config Depth uint64 DirNameLen int @@ -82,7 +82,7 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - compressor: nil, + Config: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, @@ -196,7 +196,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.compressor.Decompress(data) + data, err = t.Decompress(data) } if err != nil { if prm.IgnoreErrors { @@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.compressor.Compress(prm.RawData) + prm.RawData = t.Compress(prm.RawData) } size = len(prm.RawData) @@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.compressor.Decompress(data) + data, err = t.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -597,12 +597,12 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Compressor) { - t.compressor = cc +func (t *FSTree) SetCompressor(cc *compression.Config) { + t.Config = cc } -func (t *FSTree) Compressor() *compression.Compressor { - return t.compressor +func (t *FSTree) Compressor() *compression.Config { + return t.Config } // SetReportErrorFunc implements common.Storage. diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 6d633dad6..07a618b0a 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -67,9 +67,12 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) + if errors.As(err, &pe) { + switch pe.Err { + case syscall.ENOSPC: + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) + } } return err } diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 6f2ac87e1..7155ddcbb 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,6 +4,7 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) type Option func(*FSTree) @@ -52,6 +53,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = l + f.log = l.With(zap.String("component", "FSTree")) } } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index d54c54f59..c11d0888b 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -50,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Len(t, objects, len(seen)) + require.Equal(t, len(objects), len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 2786321a8..ccfa510fe 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -8,7 +8,6 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -25,9 +24,7 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompression(compression.Config{ - Enabled: true, - }), + WithCompressObjects(true), ) defer os.RemoveAll(p) diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 3df96a1c3..95a916662 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -16,7 +16,7 @@ func (s *memstoreImpl) Init() error func (s *memstoreImpl) Close(context.Context) error { return nil } func (s *memstoreImpl) Type() string { return Type } func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } +func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 7605af4e5..97a03993d 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -7,7 +7,7 @@ import ( type cfg struct { rootPath string readOnly bool - compression *compression.Compressor + compression *compression.Config } func defaultConfig() *cfg { diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index 3a38ecf82..fb1188751 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -17,8 +17,8 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Compressor) - Compressor func() *compression.Compressor + SetCompressor func(cc *compression.Config) + Compressor func() *compression.Config SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) @@ -45,11 +45,11 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Compressor)) Option { +func WithSetCompressor(f func(*compression.Config)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Compressor) Option { +func WithCompressor(f func() *compression.Config) Option { return func(c *cfg) { c.overrides.Compressor = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 190b6a876..626ba0023 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Compressor) { +func (s *TestStore) SetCompressor(cc *compression.Config) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Compressor) { } } -func (s *TestStore) Compressor() *compression.Compressor { +func (s *TestStore) Compressor() *compression.Config { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index e0617a832..3160d7f83 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,9 +48,8 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var csErr error - res, csErr = e.containerSize(ctx, prm) - return csErr + res = e.containerSize(ctx, prm) + return nil }) return @@ -70,13 +69,12 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { - var res ContainerSizeRes - err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.ContainerSize(ctx, csPrm) + csRes, err := sh.Shard.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) @@ -88,7 +86,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) return false }) - return res, err + return } // ListContainers returns a unique container IDs presented in the engine objects. @@ -98,9 +96,8 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var lcErr error - res, lcErr = e.listContainers(ctx) - return lcErr + res = e.listContainers(ctx) + return nil }) return @@ -118,11 +115,11 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { return res.Containers(), nil } -func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { +func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { uniqueIDs := make(map[string]cid.ID) - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { - res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) return false @@ -136,9 +133,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, } return false - }); err != nil { - return ListContainersRes{}, err - } + }) result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { @@ -147,5 +142,5 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, return ListContainersRes{ containers: result, - }, nil + } } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index bf1649f6e..7caa515d4 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,6 +22,10 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { + return e.open(ctx) +} + +func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -73,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.lowMem && e.anyShardRequiresRefill() { + if e.cfg.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } @@ -145,11 +149,11 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.closeEngine(ctx) + return e.setBlockExecErr(ctx, errClosed) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) closeAllShards(ctx context.Context) error { +func (e *StorageEngine) close(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -172,23 +176,70 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.closed { - return errClosed + if e.blockExec.err != nil { + return e.blockExec.err } return op() } -func (e *StorageEngine) closeEngine(ctx context.Context) error { +// sets the flag of blocking execution of all data operations according to err: +// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method +// (if err == errClosed => additionally releases pools and does not allow to resume executions). +// - otherwise, resumes execution. If exec was blocked, calls open method. +// +// Can be called concurrently with exec. In this case it waits for all executions to complete. +func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - if e.blockExec.closed { + prevErr := e.blockExec.err + + wasClosed := errors.Is(prevErr, errClosed) + if wasClosed { return errClosed } - e.blockExec.closed = true - return e.closeAllShards(ctx) + e.blockExec.err = err + + if err == nil { + if prevErr != nil { // block -> ok + return e.open(ctx) + } + } else if prevErr == nil { // ok -> block + return e.close(ctx) + } + + // otherwise do nothing + + return nil +} + +// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. +// To resume the execution, use ResumeExecution method. +// +// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources +// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions +// to complete). Returns error if any Close has been called before. +// +// Must not be called concurrently with either Open or Init. +// +// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution +// for this. +func (e *StorageEngine) BlockExecution(err error) error { + return e.setBlockExecErr(context.Background(), err) +} + +// ResumeExecution resumes the execution of any data-related operation. +// To block the execution, use BlockExecution method. +// +// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources +// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions +// to complete). Returns error if any Close has been called before. +// +// Must not be called concurrently with either Open or Init. +func (e *StorageEngine) ResumeExecution() error { + return e.setBlockExecErr(context.Background(), nil) } type ReConfiguration struct { diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index 4ff0ed5ec..a0e658aeb 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,6 +2,7 @@ package engine import ( "context" + "errors" "fmt" "io/fs" "os" @@ -11,14 +12,17 @@ import ( "testing" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -159,6 +163,42 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } +func TestExecBlocks(t *testing.T) { + e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many + + // put some object + obj := testutil.GenerateObjectWithCID(cidtest.ID()) + + addr := object.AddressOf(obj) + + require.NoError(t, Put(context.Background(), e, obj, false)) + + // block executions + errBlock := errors.New("block exec err") + + require.NoError(t, e.BlockExecution(errBlock)) + + // try to exec some op + _, err := Head(context.Background(), e, addr) + require.ErrorIs(t, err, errBlock) + + // resume executions + require.NoError(t, e.ResumeExecution()) + + _, err = Head(context.Background(), e, addr) // can be any data-related op + require.NoError(t, err) + + // close + require.NoError(t, e.Close(context.Background())) + + // try exec after close + _, err = Head(context.Background(), e, addr) + require.Error(t, err) + + // try to resume + require.Error(t, e.ResumeExecution()) +} + func TestPersistentShardID(t *testing.T) { dir := t.TempDir() diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 223cdbc48..5e5f65fa2 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -71,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -116,22 +116,20 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }); err != nil { - return err - } + }) if locked.is { return new(apistatus.ObjectLocked) } if splitInfo != nil { - return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } return nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) @@ -144,7 +142,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo inhumePrm.ForceRemoval() } - return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 376d545d3..a915c9bd6 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -33,8 +33,9 @@ type StorageEngine struct { wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - closed bool + mtx sync.RWMutex + + err error } evacuateLimiter *evacuationLimiter } @@ -211,18 +212,12 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } - evLimMtx := &sync.RWMutex{} - evLimCond := sync.NewCond(evLimMtx) - return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{ - guard: evLimMtx, - statusCond: evLimCond, - }, + cfg: c, + shards: make(map[string]hashedShard), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{}, } } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index fc6d9ee9c..6ef3846ee 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,11 +2,8 @@ package engine import ( "context" - "fmt" "path/filepath" - "runtime/debug" - "strings" - "sync" + "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" @@ -116,8 +113,7 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), + blobovniczatree.WithLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, @@ -161,74 +157,26 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes var _ qos.Limiter = (*testQoSLimiter)(nil) type testQoSLimiter struct { - t testing.TB - quard sync.Mutex - id int64 - readStacks map[int64][]byte - writeStacks map[int64][]byte + t testing.TB + read atomic.Int64 + write atomic.Int64 } func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} func (t *testQoSLimiter) Close() { - t.quard.Lock() - defer t.quard.Unlock() - - var sb strings.Builder - var seqN int - for _, stack := range t.readStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) - } - for _, stack := range t.writeStacks { - seqN++ - sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) - } - require.True(t.t, seqN == 0, sb.String()) + require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0") + require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0") } func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.readStacks == nil { - t.readStacks = make(map[int64][]byte) - } - t.readStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.readStacks, id) - }, nil + t.read.Add(1) + return func() { t.read.Add(-1) }, nil } func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { - t.quard.Lock() - defer t.quard.Unlock() - - stack := debug.Stack() - - t.id++ - id := t.id - - if t.writeStacks == nil { - t.writeStacks = make(map[int64][]byte) - } - t.writeStacks[id] = stack - - return func() { - t.quard.Lock() - defer t.quard.Unlock() - - delete(t.writeStacks, id) - }, nil + t.write.Add(1) + return func() { t.write.Add(-1) }, nil } func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index b75e8686d..c74134500 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -95,7 +95,8 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - if s.startedAt.IsZero() { + defaultTime := time.Time{} + if s.startedAt == defaultTime { return nil } return &s.startedAt @@ -105,7 +106,8 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - if s.finishedAt.IsZero() { + defaultTime := time.Time{} + if s.finishedAt == defaultTime { return nil } return &s.finishedAt @@ -139,8 +141,7 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard *sync.RWMutex - statusCond *sync.Cond // used in unit tests + guard sync.RWMutex } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -166,7 +167,6 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } - l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -182,7 +182,6 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() - l.statusCond.Broadcast() l.eg = nil } @@ -217,7 +216,6 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil - l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index f2ba7d994..bd5222b78 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -204,10 +204,11 @@ func TestEvacuateShardObjects(t *testing.T) { func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { var st *EvacuationState var err error - e.evacuateLimiter.waitForCompleted() - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err) - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + require.Eventually(t, func() bool { + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + return st.ProcessingStatus() == EvacuateProcessStateCompleted + }, 3*time.Second, 10*time.Millisecond) return st } @@ -816,12 +817,3 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } - -func (l *evacuationLimiter) waitForCompleted() { - l.guard.Lock() - defer l.guard.Unlock() - - for l.state.processState != EvacuateProcessStateCompleted { - l.statusCond.Wait() - } -} diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 7dac9eb97..9d2b1c1b7 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -50,9 +50,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }); err != nil { - return false, false, err - } + }) if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 0694c53f3..74c64bbb6 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -78,9 +78,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - if err := it.tryGetWithMeta(ctx); err != nil { - return GetRes{}, err - } + it.tryGetWithMeta(ctx) if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -99,9 +97,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - if err := it.tryGetFromBlobstore(ctx); err != nil { - return GetRes{}, err - } + it.tryGetFromBlobstore(ctx) if it.Object == nil { return GetRes{}, it.OutError @@ -137,8 +133,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -191,13 +187,13 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d436dd411..d6892f129 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -82,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -123,9 +123,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) } head = res.Object() return true - }); err != nil { - return HeadRes{}, err - } + }) if head != nil { return HeadRes{head: head}, nil diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e5f7072e2..c8ee33b53 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -74,7 +74,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { - addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { return err } @@ -84,6 +84,8 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { shPrm.ForceRemoval() } + var errLocked *apistatus.ObjectLocked + for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { shPrm.SetTarget(*prm.tombstone, addrs...) @@ -101,107 +103,39 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { } if _, err := sh.Inhume(ctx, shPrm); err != nil { - e.reportInhumeError(ctx, err, sh) + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) + } return err } } - return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) -} - -func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { - if err == nil { - return - } - - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): - case errors.Is(err, shard.ErrLockObjectRemoval): - case errors.Is(err, shard.ErrReadOnlyMode): - case errors.Is(err, shard.ErrDegradedMode): - default: - e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) - } -} - -// inhumeNotFoundObjects removes object which are not found on any shard. -// -// Besides an object not being found on any shard, it is also important to -// remove it anyway in order to populate the metabase indexes because they are -// responsible for the correct object status, i.e., the status will be `object -// not found` without the indexes, the status will be `object is already -// removed` with the indexes. -// -// It is suggested to evenly remove those objects on each shard with the batch -// size equal to 1 + floor(number of objects / number of shards). -func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { - if len(addrs) == 0 { - return nil - } - - var shPrm shard.InhumePrm - if prm.forceRemoval { - shPrm.ForceRemoval() - } - - numObjectsPerShard := 1 + len(addrs)/len(e.shards) - - var inhumeErr error - itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { - numObjects := min(numObjectsPerShard, len(addrs)) - - if numObjects == 0 { - return true - } - - if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) - } else { - shPrm.MarkAsGarbage(addrs[:numObjects]...) - } - addrs = addrs[numObjects:] - - _, inhumeErr = hs.Inhume(ctx, shPrm) - e.reportInhumeError(ctx, inhumeErr, hs) - return inhumeErr != nil - }) - if inhumeErr != nil { - return inhumeErr - } - return itErr + return nil } // groupObjectsByShard groups objects based on the shard(s) they are stored on. // // If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of // the objects are locked. -// -// Returns two sets of objects: found objects which are grouped per shard and -// not found object. Not found objects are objects which are not found on any -// shard. This can happen if a node is a container node but doesn't participate -// in a replica group of the object. -func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { - groups = make(map[string][]oid.Address) +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) { + groups := make(map[string][]oid.Address) - var ids []string for _, addr := range addrs { - ids, err = e.findShards(ctx, addr, checkLocked) + ids, err := e.findShards(ctx, addr, checkLocked) if err != nil { - return + return nil, err } - - if len(ids) == 0 { - notFoundObjects = append(notFoundObjects, addr) - continue - } - for _, id := range ids { groups[id] = append(groups[id], addr) } } - return + return groups, nil } // findShards determines the shard(s) where the object is stored. @@ -224,7 +158,7 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL objectExists bool ) - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { objectExists = false prm.Address = addr @@ -252,6 +186,10 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL default: } + if !objectExists { + return + } + if checkLocked { if isLocked, err := sh.IsLocked(ctx, addr); err != nil { e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, @@ -264,20 +202,11 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL } } - // This exit point must come after checking if the object is locked, - // since the locked index may be populated even if the object doesn't - // exist. - if !objectExists { - return - } - ids = append(ids, sh.ID().String()) // Continue if it's a root object. return !isRootObject - }); err != nil { - return nil, err - } + }) if retErr != nil { return nil, retErr @@ -297,8 +226,8 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locked, err = h.IsLocked(ctx, addr) + e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err @@ -306,9 +235,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e } return locked - }); err != nil { - return false, err - } + }) if locked { return locked, nil @@ -328,17 +255,15 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I var allLocks []oid.ID var outErr error - if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { - locks, err := h.GetLocks(ctx, addr) + e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + locks, err := h.Shard.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err } allLocks = append(allLocks, locks...) return false - }); err != nil { - return nil, err - } + }) if len(allLocks) > 0 { return allLocks, nil } @@ -346,23 +271,20 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) - } + }) } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { @@ -372,13 +294,11 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) - } + }) } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { sh.HandleDeletedLocks(ctx, lockers) select { @@ -388,25 +308,26 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A default: return false } - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) - } + }) } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } + idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } + if len(idMap) == 0 { return } + var failed bool var prm shard.ContainerSizePrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -433,15 +354,13 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } + }) + if failed || len(idMap) == 0 { return } - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -459,13 +378,12 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) - return - } + }) + if failed { return } + for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -475,16 +393,19 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } + idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } + if len(idMap) == 0 { return } + var failed bool var prm shard.ContainerCountPrm - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -511,15 +432,13 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } + }) + if failed || len(idMap) == 0 { return } - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { + e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -537,13 +456,12 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return false - }); err != nil { - e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) - return - } + }) + if failed { return } + for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 0e268cd23..10cebfb52 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -11,7 +11,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -243,100 +242,3 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() } } - -func TestInhumeIfObjectDoesntExist(t *testing.T) { - const numShards = 4 - - engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine - t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) - - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeIfObjectDoesntExist(t, engine, false, true) - }) - - t.Run("object is locked", func(t *testing.T) { - t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, false) - }) - t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, true, false) - }) - t.Run("force inhume", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, engine, false, true) - }) - }) -} - -func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) - - err := testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { - t.Parallel() - - object := oidtest.Address() - require.NoError(t, testLockObject(e, object)) - - err := testInhumeObject(t, e, object, withTombstone, withForce) - if !withForce { - var errLocked *apistatus.ObjectLocked - require.ErrorAs(t, err, &errLocked) - return - } - require.NoError(t, err) - - err = testHeadObject(e, object) - if withTombstone { - require.True(t, client.IsErrObjectAlreadyRemoved(err)) - } else { - require.True(t, client.IsErrObjectNotFound(err)) - } -} - -func testLockObject(e *StorageEngine, obj oid.Address) error { - return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) -} - -func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { - tombstone := oidtest.Address() - tombstone.SetContainer(obj.Container()) - - // Due to the tests design it is possible to set both the options, - // however removal with tombstone and force removal are exclusive. - require.False(t, withTombstone && withForce) - - var inhumePrm InhumePrm - if withTombstone { - inhumePrm.WithTarget(tombstone, obj) - } else { - inhumePrm.MarkAsGarbage(obj) - } - if withForce { - inhumePrm.WithForceRemoval() - } - return e.Inhume(context.Background(), inhumePrm) -} - -func testHeadObject(e *StorageEngine, obj oid.Address) error { - var headPrm HeadPrm - headPrm.WithAddress(obj) - - _, err := e.Head(context.Background(), headPrm) - return err -} diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 3b0cf74f9..5d43e59df 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -41,19 +41,11 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) - if err != nil { - return err - } - switch st { + switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) - if err != nil { - return err - } - switch st { + switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -69,13 +61,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { // code is pretty similar to inhumeAddr, maybe unify? root := false var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards @@ -92,11 +84,17 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) - if !ok { - return false + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err = objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return false + } + eclocked = append(eclocked, objID) } - err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), @@ -139,18 +137,3 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo }) return } - -func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err := objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return nil, false - } - eclocked = append(eclocked, objID) - } - return eclocked, true -} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 10cf5ffd5..b348d13a2 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -96,7 +96,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { + e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() @@ -106,9 +106,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }); err != nil { - return err - } + }) switch shRes.status { case putToShardUnknown: return errPutShard diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 7ec4742d8..a468cf594 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -93,9 +93,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - if err := it.tryGetWithMeta(ctx); err != nil { - return RngRes{}, err - } + it.tryGetWithMeta(ctx) if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -111,9 +109,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - if err := it.tryGetFromBlobstor(ctx); err != nil { - return RngRes{}, err - } + it.tryGetFromBlobstor(ctx) if it.Object == nil { return RngRes{}, it.OutError @@ -161,8 +157,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -213,13 +209,13 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { + i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 4243a5481..fc8b4a9a7 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,15 +54,14 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var sErr error - res, sErr = e._select(ctx, prm) - return sErr + res = e._select(ctx, prm) + return nil }) return } -func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { +func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) @@ -70,7 +69,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -85,13 +84,11 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, } return false - }); err != nil { - return SelectRes{}, err - } + }) return SelectRes{ addrList: addrList, - }, nil + } } // List returns `limit` available physically storage object addresses in engine. @@ -101,21 +98,20 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - var lErr error - res, lErr = e.list(ctx, limit) - return lErr + res = e.list(ctx, limit) + return nil }) return } -func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { +func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) // consider iterating over shuffled shards - if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -134,13 +130,11 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } return false - }); err != nil { - return SelectRes{}, err - } + }) return SelectRes{ addrList: addrList, - }, nil + } } // Select selects objects from local storage using provided filters. diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 69067c500..a38c85151 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -118,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - e.metrics.SetMode(sh.ID().String(), sh.GetMode()) + e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } @@ -280,32 +280,20 @@ func (e *StorageEngine) unsortedShards() []hashedShard { return shards } -func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { +func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { for i, sh := range e.sortShards(addr) { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } if handler(i, sh) { break } } - return nil } -func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { +func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { for _, sh := range e.unsortedShards() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } if handler(sh) { break } } - return nil } // SetShardMode sets mode of the shard with provided identifier. @@ -330,6 +318,8 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { + ev := shard.EventNewEpoch(epoch) + e.mtx.RLock() defer e.mtx.RUnlock() @@ -337,7 +327,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- epoch: + case sh.NotificationChannel() <- ev: default: e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) @@ -445,7 +435,7 @@ func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address var siErr *objectSDK.SplitInfoError var ecErr *objectSDK.ECInfoError - if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + e.iterateOverUnsortedShards(func(hs hashedShard) (stop bool) { res, exErr := hs.Exists(ctx, prm) if exErr != nil { if client.IsErrObjectAlreadyRemoved(exErr) { @@ -475,8 +465,6 @@ func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address info = append(info, hs.DumpInfo()) } return false - }); itErr != nil { - return nil, itErr - } + }) return info, err } diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 52b199b0b..383c596af 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,9 +1,7 @@ package testutil import ( - cryptorand "crypto/rand" "encoding/binary" - "math/rand" "sync/atomic" "testing" @@ -11,6 +9,7 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -62,7 +61,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = cryptorand.Read(data) + _, _ = rand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -83,7 +82,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = cryptorand.Read(id[:]) + _, _ = rand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 1087e40be..60e9211d5 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,7 +1,6 @@ package testutil import ( - "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -12,6 +11,7 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" + "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 9a5a6e574..d338e228f 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -363,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) if err != nil { return 0 } @@ -376,12 +376,11 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { bkt := tx.Bucket(item.name) if bkt != nil { - return bkt.Delete(item.key) + _ = bkt.Delete(item.key) // ignore error, best effort there } - return nil } func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -406,16 +405,19 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - return bkt.Delete(item.key) + _ = bkt.Delete(item.key) // ignore error, best effort there + + return nil } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return err + return nil // ignore error, best effort there } - return bkt.Put(item.key, encodedLst) + _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there + return nil } func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -478,47 +480,35 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }); err != nil { - return err - } + }) } else { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } + }) } - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }) + delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }); err != nil { - return err - } + }) if expEpoch, ok := hasExpirationEpoch(obj); ok { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: expEpochToObjectBucketName, key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }); err != nil { - return err - } - if err := delUniqueIndexItem(tx, namedBucketItem{ + }) + delUniqueIndexItem(tx, namedBucketItem{ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), key: objKey, - }); err != nil { - return err - } + }) } return nil @@ -545,12 +535,10 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - if err := delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }); err != nil { - return err - } + }) } if ech.ParentSplitParentID() == nil { @@ -584,10 +572,11 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. } // drop split info - return delUniqueIndexItem(tx, namedBucketItem{ + delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) + return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index f4cb9e53b..f73c2b4f6 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -7,7 +7,6 @@ import ( "slices" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -64,7 +63,9 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - assert.False(len(locked) == 0, "empty locked list") + if len(locked) == 0 { + panic("empty locked list") + } err := db.lockInternal(locked, cnr, locker) success = err == nil diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 4ad83332b..80851f1c4 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -279,7 +278,9 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") + if len(objs) == 0 { + panic("empty object list in firstIrregularObjectType") + } var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 897b37ea0..fc7cdaabc 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.FromBytes(data[16:]) + return lm.Meta.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Size() + 1 + size := 8 + 8 + lm.Meta.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.EncodeBinary(w.BinWriter) + lm.Meta.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index ebfd0bcc0..b5320e42d 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.getChildren(nodeID) + children := s.tree.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -222,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.getChildren(nodeID) + children := s.tree.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index 28b7faec8..ce7b3db1e 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.infoMap[op.Child] = op.Old + s.tree.infoMap[op.Child] = op.Old } else { - delete(s.infoMap, op.Child) + delete(s.tree.infoMap, op.Child) } } @@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.isAncestor(op.Child, op.Parent) - p, ok := s.infoMap[op.Child] + shouldPut := !s.tree.isAncestor(op.Child, op.Parent) + p, ok := s.tree.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.infoMap[op.Child] = p + s.tree.infoMap[op.Child] = p return lm } @@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTS Timestamp + var lastTs Timestamp children := t.getChildren(curNode) for i := range children { @@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTS { + if info.Meta.Time >= lastTs { nodes = append(nodes[:0], children[i]) } } else { diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index d489b8b0d..6dee2da3f 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -108,17 +108,19 @@ func (s *Shard) Init(ctx context.Context) error { s.updateMetrics(ctx) s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - newEpochChan: make(chan uint64), - newEpochHandlers: &newEpochHandlers{ - cancelFunc: func() {}, - handlers: []newEpochHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + eventChan: make(chan Event), + mEventHandler: map[eventType]*eventHandlers{ + eventNewEpoch: { + cancelFunc: func() {}, + handlers: []eventHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, + }, }, }, } @@ -214,8 +216,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if s.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.refillMetabaseWorkersCount) + if s.cfg.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.cfg.refillMetabaseWorkersCount) } var completedCount uint64 @@ -363,7 +365,6 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object // Close releases all Shard's components. func (s *Shard) Close(ctx context.Context) error { - unlock := s.lockExclusive() if s.rb != nil { s.rb.Stop(ctx, s.log) } @@ -389,19 +390,15 @@ func (s *Shard) Close(ctx context.Context) error { } } - if s.opsLimiter != nil { - s.opsLimiter.Close() - } - - unlock() - - // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. - // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { s.gc.stop(ctx) } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + return lastErr } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index a262a52cb..32a377cd5 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -33,14 +33,41 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -type newEpochHandler func(context.Context, uint64) +// Event represents class of external events. +type Event interface { + typ() eventType +} -type newEpochHandlers struct { +type eventType int + +const ( + _ eventType = iota + eventNewEpoch +) + +type newEpoch struct { + epoch uint64 +} + +func (e newEpoch) typ() eventType { + return eventNewEpoch +} + +// EventNewEpoch returns new epoch event. +func EventNewEpoch(e uint64) Event { + return newEpoch{ + epoch: e, + } +} + +type eventHandler func(context.Context, Event) + +type eventHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []newEpochHandler + handlers []eventHandler } type gcRunResult struct { @@ -82,10 +109,10 @@ type gc struct { remover func(context.Context) gcRunResult - // newEpochChan is used only for listening for the new epoch event. + // eventChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - newEpochChan chan uint64 - newEpochHandlers *newEpochHandlers + eventChan chan Event + mEventHandler map[eventType]*eventHandlers } type gcCfg struct { @@ -115,7 +142,15 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) + sz := 0 + + for _, v := range gc.mEventHandler { + sz += len(v.handlers) + } + + if sz > 0 { + gc.workerPool = gc.workerPoolInit(sz) + } ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) @@ -133,7 +168,7 @@ func (gc *gc) listenEvents(ctx context.Context) { case <-ctx.Done(): gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.newEpochChan: + case event, ok := <-gc.eventChan: if !ok { gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return @@ -144,33 +179,38 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, event Event) { + v, ok := gc.mEventHandler[event.typ()] + if !ok { + return + } + + v.cancelFunc() + v.prevGroup.Wait() var runCtx context.Context - runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) + runCtx, v.cancelFunc = context.WithCancel(ctx) - gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) + v.prevGroup.Add(len(v.handlers)) - for i := range gc.newEpochHandlers.handlers { + for i := range v.handlers { select { case <-ctx.Done(): return default: } - h := gc.newEpochHandlers.handlers[i] + h := v.handlers[i] err := gc.workerPool.Submit(func() { - defer gc.newEpochHandlers.prevGroup.Done() - h(runCtx, epoch) + defer v.prevGroup.Done() + h(runCtx, event) }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.Error(err), ) - gc.newEpochHandlers.prevGroup.Done() + v.prevGroup.Done() } } } @@ -227,9 +267,6 @@ func (gc *gc) stop(ctx context.Context) { gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() - - gc.newEpochHandlers.cancelFunc() - gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects @@ -320,12 +357,12 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { } func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) return } -func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -333,8 +370,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -343,7 +380,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -391,16 +428,6 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } - s.handleExpiredObjectsUnsafe(ctx, expired) -} - -func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { - select { - case <-ctx.Done(): - return - default: - } - expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) @@ -459,7 +486,7 @@ func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeR return s.metaBase.Inhume(ctx, inhumePrm) } -func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -467,6 +494,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() + epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) @@ -499,8 +527,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { return } - var release qos.ReleaseFunc - release, err = s.opsLimiter.ReadRequest(ctx) + release, err := s.opsLimiter.ReadRequest(ctx) if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() @@ -538,7 +565,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -546,8 +573,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -557,14 +584,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) + s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -578,7 +605,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, epoch, expired) + s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) return egCtx.Err() }) } @@ -621,6 +648,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return nil, ErrDegradedMode + } + release, err := s.opsLimiter.ReadRequest(ctx) if err != nil { return nil, err @@ -670,10 +704,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { + if s.GetMode().NoMetabase() { return } @@ -731,15 +762,12 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) + s.handleExpiredObjects(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { + if s.GetMode().NoMetabase() { return } @@ -756,15 +784,17 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { } } -// NotificationChannel returns channel for new epoch events. -func (s *Shard) NotificationChannel() chan<- uint64 { - return s.gc.newEpochChan +// NotificationChannel returns channel for shard events. +func (s *Shard) NotificationChannel() chan<- Event { + return s.gc.eventChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() + epoch := e.(newEpoch).epoch + s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 54d2f1510..9998bbae2 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,8 +37,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index f512a488a..e3670b441 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) + sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) @@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), epoch.Value) + sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 7391adef2..b233b705c 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { } shardID := s.info.ID.String() - s.metricsWriter.SetShardID(shardID) + s.cfg.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 3878a65cd..5caf3641f 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,10 +28,9 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() - l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(l), + WithLogger(logger.NewLoggerWrapper(zap.NewNop())), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 06fe9f511..146e834cc 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,8 +79,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index d89b56266..b9ec05f01 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -205,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l.WithTag(logger.TagGC) + c.gcCfg.log = l } } @@ -218,7 +218,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.useWriteCache + return s.cfg.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -379,15 +379,15 @@ func WithLimiter(l qos.Limiter) Option { } func (s *Shard) fillInfo() { - s.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.info.BlobStorInfo = s.blobStor.DumpInfo() - s.info.Mode = s.GetMode() + s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() + s.cfg.info.Mode = s.GetMode() - if s.useWriteCache { - s.info.WriteCacheInfo = s.writeCache.DumpInfo() + if s.cfg.useWriteCache { + s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.info.PiloramaInfo = s.pilorama.DumpInfo() + s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -454,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.metricsWriter.SetMode(s.info.Mode) + s.cfg.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.metricsWriter.IncObjectCounter(physical) - s.metricsWriter.IncObjectCounter(logical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + s.cfg.metricsWriter.IncObjectCounter(physical) + s.cfg.metricsWriter.IncObjectCounter(logical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) if isUser { - s.metricsWriter.IncObjectCounter(user) - s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) + s.cfg.metricsWriter.IncObjectCounter(user) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { if v > 0 { - s.metricsWriter.AddToObjectCounter(typ, -int(v)) + s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { if v > 0 { - s.metricsWriter.SetObjectCounter(typ, v) + s.cfg.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { for cnrID, count := range byCnr { if count.Phy > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { if size != 0 { - s.metricsWriter.AddToContainerSize(cnr, size) + s.cfg.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { if size != 0 { - s.metricsWriter.AddToPayloadSize(size) + s.cfg.metricsWriter.AddToPayloadSize(size) } } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index 84be71c4d..f9ee34488 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,8 +60,7 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), - blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), + blobovniczatree.WithLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go index 0e020b36e..ddc4101be 100644 --- a/pkg/local_object_storage/writecache/limiter.go +++ b/pkg/local_object_storage/writecache/limiter.go @@ -3,8 +3,6 @@ package writecache import ( "errors" "sync" - - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) var errLimiterClosed = errors.New("acquire failed: limiter closed") @@ -47,11 +45,17 @@ func (l *flushLimiter) release(size uint64) { l.cond.L.Lock() defer l.cond.L.Unlock() - assert.True(l.size >= size, "flushLimiter: invalid size") - l.size -= size + if l.size >= size { + l.size -= size + } else { + panic("flushLimiter: invalid size") + } - assert.True(l.count > 0, "flushLimiter: invalid count") - l.count-- + if l.count > 0 { + l.count-- + } else { + panic("flushLimiter: invalid count") + } l.cond.Broadcast() } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index a4f98ad06..dbbe66c19 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -5,6 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -45,7 +46,7 @@ type options struct { // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = log + o.log = log.With(zap.String("component", "WriteCache")) } } diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 7ed511318..70b17eb8e 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -52,7 +52,7 @@ type Cache interface { // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Compressor + Compressor() *compression.Config Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index aab058d27..e63d926e0 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,7 +9,6 @@ import ( "sync/atomic" "time" - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -61,9 +60,6 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper - nnsHash util.Uint160 // NNS contract hash - - nnsReader *nnsClient.ContractReader // NNS contract wrapper acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -98,12 +94,27 @@ type Client struct { type cache struct { m sync.RWMutex + nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } +func (c *cache) nns() *util.Uint160 { + c.m.RLock() + defer c.m.RUnlock() + + return c.nnsHash +} + +func (c *cache) setNNSHash(nnsHash util.Uint160) { + c.m.Lock() + defer c.m.Unlock() + + c.nnsHash = &nnsHash +} + func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -122,6 +133,7 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() + c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } @@ -151,6 +163,20 @@ func (e *notHaltStateError) Error() string { ) } +// implementation of error interface for FrostFS-specific errors. +type frostfsError struct { + err error +} + +func (e frostfsError) Error() string { + return fmt.Sprintf("frostfs error: %v", e.err) +} + +// wraps FrostFS-specific error into frostfsError. Arg must not be nil. +func wrapFrostFSError(err error) error { + return frostfsError{err} +} + // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. @@ -214,7 +240,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return ¬HaltStateError{state: val.State, exception: val.FaultException} + return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -278,7 +304,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} + return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) } success = true @@ -565,7 +591,6 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) - c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index e4dcd0db7..d061747bb 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -145,11 +145,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er if cli.client == nil { return nil, ErrNoHealthyEndpoint } - cs, err := cli.client.GetContractStateByID(nnsContractID) - if err != nil { - return nil, fmt.Errorf("resolve nns hash: %w", err) - } - cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 3f6aed506..fcdb70e3f 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -2,6 +2,7 @@ package netmap import ( "context" + "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -25,24 +26,44 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, MaxObjectSizeConfig) + objectSize, err := c.readUInt64Config(ctx, MaxObjectSizeConfig) + if err != nil { + return 0, err + } + + return objectSize, nil } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, EpochDurationConfig) + epochDuration, err := c.readUInt64Config(ctx, EpochDurationConfig) + if err != nil { + return 0, err + } + + return epochDuration, nil } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerFeeConfig) + fee, err := c.readUInt64Config(ctx, ContainerFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, ContainerAliasFeeConfig) + fee, err := c.readUInt64Config(ctx, ContainerAliasFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing @@ -56,13 +77,23 @@ func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, IrCandidateFeeConfig) + fee, err := c.readUInt64Config(ctx, IrCandidateFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { - return c.readUInt64Config(ctx, WithdrawFeeConfig) + fee, err := c.readUInt64Config(ctx, WithdrawFeeConfig) + if err != nil { + return 0, err + } + + return fee, nil } // MaintenanceModeAllowed reads admission of "maintenance" state from the @@ -75,27 +106,29 @@ func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { } func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { - v, err := c.config(ctx, []byte(key)) + v, err := c.config(ctx, []byte(key), IntegerAssert) if err != nil { return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } - bi, err := v.TryInteger() - if err != nil { - return 0, err - } - return bi.Uint64(), nil + // IntegerAssert is guaranteed to return int64 if the error is nil. + return uint64(v.(int64)), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { - v, err := c.config(ctx, []byte(key)) + v, err := c.config(ctx, []byte(key), BoolAssert) if err != nil { + if errors.Is(err, ErrConfigNotFound) { + return false, nil + } + return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - return v.TryBool() + // BoolAssert is guaranteed to return bool if the error is nil. + return v.(bool), nil } // SetConfigPrm groups parameters of SetConfig operation. @@ -244,11 +277,15 @@ func bytesToBool(val []byte) bool { return false } +// ErrConfigNotFound is returned when the requested key was not found +// in the network config (returned value is `Null`). +var ErrConfigNotFound = errors.New("config value not found") + // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { +func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.Item) (any, error)) (any, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) @@ -264,7 +301,26 @@ func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) configMethod, ln) } - return items[0], nil + if _, ok := items[0].(stackitem.Null); ok { + return nil, ErrConfigNotFound + } + + return assert(items[0]) +} + +// IntegerAssert converts stack item to int64. +func IntegerAssert(item stackitem.Item) (any, error) { + return client.IntFromStackItem(item) +} + +// StringAssert converts stack item to string. +func StringAssert(item stackitem.Item) (any, error) { + return client.StringFromStackItem(item) +} + +// BoolAssert converts stack item to bool. +func BoolAssert(item stackitem.Item) (any, error) { + return client.BoolFromStackItem(item) } // iterateRecords iterates over all config records and passes them to f. diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index bc00eb889..f292dccf1 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,12 +8,14 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" - nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -35,8 +37,12 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -// ErrNNSRecordNotFound means that there is no such record in NNS contract. -var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") +var ( + // ErrNNSRecordNotFound means that there is no such record in NNS contract. + ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") + + errEmptyResultStack = errors.New("returned result stack is empty") +) // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -55,36 +61,97 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - sh, err = nnsResolve(c.nnsReader, name) + nnsHash, err := c.NNSHash() + if err != nil { + return util.Uint160{}, err + } + + sh, err = nnsResolve(c.client, nnsHash, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { - available, err := r.IsAvailable(domain) +// NNSHash returns NNS contract hash. +func (c *Client) NNSHash() (util.Uint160, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return util.Uint160{}, ErrConnectionLost + } + + success := false + startedAt := time.Now() + + defer func() { + c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) + }() + + nnsHash := c.cache.nns() + + if nnsHash == nil { + cs, err := c.client.GetContractStateByID(nnsContractID) + if err != nil { + return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) + } + + c.cache.setNNSHash(cs.Hash) + nnsHash = &cs.Hash + } + success = true + return *nnsHash, nil +} + +func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { + found, err := exists(c, nnsHash, domain) if err != nil { return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if available { + if !found { return nil, ErrNNSRecordNotFound } - return r.Resolve(domain, big.NewInt(int64(nns.TXT))) + result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ + { + Type: smartcontract.StringType, + Value: domain, + }, + { + Type: smartcontract.IntegerType, + Value: big.NewInt(int64(nns.TXT)), + }, + }, nil) + if err != nil { + return nil, err + } + if result.State != vmstate.Halt.String() { + return nil, fmt.Errorf("invocation failed: %s", result.FaultException) + } + if len(result.Stack) == 0 { + return nil, errEmptyResultStack + } + return result.Stack[0], nil } -func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { - arr, err := nnsResolveItem(r, domain) +func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { + res, err := nnsResolveItem(c, nnsHash, domain) if err != nil { return util.Uint160{}, err } - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") + // Parse the result of resolving NNS record. + // It works with multiple formats (corresponding to multiple NNS versions). + // If array of hashes is provided, it returns only the first one. + if arr, ok := res.Value().([]stackitem.Item); ok { + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") + } + res = arr[0] } - bs, err := arr[0].TryBytes() + bs, err := res.TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -104,6 +171,33 @@ func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error return util.Uint160{}, errors.New("no valid hashes are found") } +func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { + result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ + { + Type: smartcontract.StringType, + Value: domain, + }, + }, nil) + if err != nil { + return false, err + } + + if len(result.Stack) == 0 { + return false, errEmptyResultStack + } + + res := result.Stack[0] + + available, err := res.TryBool() + if err != nil { + return false, fmt.Errorf("malformed response: %w", err) + } + + // not available means that it is taken + // and, therefore, exists + return !available, nil +} + // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -147,12 +241,18 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) + nnsHash, err := c.NNSHash() if err != nil { return nil, err } - if len(arr) == 0 { + item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) + if err != nil { + return nil, err + } + + arr, ok := item.Value().([]stackitem.Item) + if !ok || len(arr) == 0 { return nil, errors.New("NNS record is missing") } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 448702613..dbd58a53a 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -38,7 +38,8 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - proxy util.Uint160 + notary util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -101,6 +102,7 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, + notary: notary.Hash, } c.notary = notaryCfg @@ -186,7 +188,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - notary.Hash, + c.notary.notary, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { @@ -461,7 +463,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return ¬HaltStateError{state: r.State, exception: r.FaultException} + return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) } t.ValidUntilBlock = until @@ -608,7 +610,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) + // wrap error as FrostFS-specific since the call is not related to any client + return nil, wrapFrostFSError(fmt.Errorf("convert account to inner ring multisig wallet: %w", err)) } } else { // alphabet multisig redeem script is @@ -616,7 +619,8 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) + // wrap error as FrostFS-specific since the call is not related to any client + return nil, wrapFrostFSError(fmt.Errorf("make inner ring multisig wallet: %w", err)) } } diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index f7b6705a8..f68d39beb 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return ¬HaltStateError{state: r.State, exception: r.FaultException} + return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 87fcf84b8..5b9d2cbe0 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -47,5 +47,5 @@ func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) err if res.VMState.HasFlag(vmstate.Halt) { return nil } - return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} + return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) } diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index b11973646..40f5984a9 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -199,8 +199,8 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && - !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version + if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || + bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version return ErrTXAlreadyHandled } @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 - !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 + bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } diff --git a/pkg/network/address.go b/pkg/network/address.go index 4643eef15..cb83a813d 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,11 +2,11 @@ package network import ( "errors" + "fmt" "net" "net/url" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -44,9 +44,11 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - assert.NoError(err, "could not get host addr") + if err != nil { + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + panic(fmt.Errorf("could not get host addr: %w", err)) + } if !a.IsTLSEnabled() { return host diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 54c1e18fb..e94fa580a 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -66,8 +66,8 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), diff --git a/pkg/network/group.go b/pkg/network/group.go index 0044fb2d4..5a71e530e 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,7 +3,6 @@ package network import ( "errors" "fmt" - "iter" "slices" "sort" @@ -69,8 +68,9 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // Addresses must return an iterator over network addresses. - Addresses() iter.Seq[string] + // IterateAddresses must iterate over network addresses and pass each one + // to the handler until it returns true. + IterateAddresses(func(string) bool) // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int @@ -131,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - for s := range iter.Addresses() { + iter.IterateAddresses(func(s string) bool { var a Address err = a.FromString(s) if err != nil { - return fmt.Errorf("could not parse address from string: %w", err) + err = fmt.Errorf("could not parse address from string: %w", err) + return true } err = f(a) - if err != nil { - return err - } - } + + return err != nil + }) return } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index d08264533..5b335fa52 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,8 +1,6 @@ package network import ( - "iter" - "slices" "sort" "testing" @@ -60,8 +58,10 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) Addresses() iter.Seq[string] { - return slices.Values(t) +func (t testIterator) IterateAddresses(f func(string) bool) { + for i := range t { + f(t[i]) + } } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/validation.go b/pkg/network/validation.go index b5157f28f..92f650119 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,7 +2,6 @@ package network import ( "errors" - "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -35,8 +34,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { - return (netmap.NodeInfo)(x).NetworkEndpoints() +func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { + (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index fcd3efa44..c9b0b7363 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -20,6 +20,7 @@ import ( ) var ( + errInvalidTargetType = errors.New("bearer token defines non-container target override") errBearerExpired = errors.New("bearer token has expired") errBearerInvalidSignature = errors.New("bearer token has invalid signature") errBearerInvalidContainerID = errors.New("bearer token was created for another container") @@ -72,22 +73,14 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora // CheckAPE performs the common policy-engine check logic on a prepared request. func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { var cr policyengine.ChainRouter - if prm.BearerToken != nil { + if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { var err error if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { return fmt.Errorf("bearer validation error: %w", err) } - if prm.BearerToken.Impersonate() { - cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) - } else { - override, isSet := prm.BearerToken.APEOverride() - if !isSet { - return errors.New("expected for override within bearer") - } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) - } + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) } } else { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) @@ -133,19 +126,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - if apeOverride, isSet := token.APEOverride(); isSet { - switch apeOverride.Target.TargetType { - case ape.TargetTypeContainer: - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID - } - default: - } + apeOverride := token.APEOverride() + if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { + return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) + } + + // Then check if container is either empty or equal to the container in the request. + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID } // Then check if container owner signed this token. diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 01bd825d7..e1fbe3960 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -655,8 +655,10 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace - } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) + } else { + if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) + } } return namespace, nil } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index 0cfca71c1..c2a4f88a6 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,7 +35,8 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - if prm.healthChecker == nil { + switch { + case prm.healthChecker == nil: panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index f3ba9015e..7469ea74e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest TreeId: treeID, Operation: &tree.LogMove{ ParentId: op.Parent, - Meta: op.Bytes(), + Meta: op.Meta.Bytes(), ChildId: op.Child, }, }, diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 1b92fdaad..44101a153 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -47,12 +46,10 @@ type NetworkInfo interface { } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - // this should never happen, otherwise it's a programmer's bug - msg := "BUG: can't create netmap execution service" - assert.False(s == nil, msg, "node state is nil") - assert.False(netInfo == nil, msg, "network info is nil") - assert.False(respSvc == nil, msg, "response service is nil") - assert.True(version.IsValid(v), msg, "invalid version") + if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { + // this should never happen, otherwise it programmers bug + panic("can't create netmap execution service") + } res := &executorSvc{ state: s, diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go new file mode 100644 index 000000000..94e015abe --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/eacl_test.go @@ -0,0 +1,166 @@ +package v2 + +import ( + "context" + "crypto/ecdsa" + "errors" + "testing" + + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +type testLocalStorage struct { + t *testing.T + + expAddr oid.Address + + obj *objectSDK.Object + + err error +} + +func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { + require.True(s.t, addr.Container().Equals(s.expAddr.Container())) + require.True(s.t, addr.Object().Equals(s.expAddr.Object())) + + return s.obj, s.err +} + +func testXHeaders(strs ...string) []session.XHeader { + res := make([]session.XHeader, len(strs)/2) + + for i := 0; i < len(strs); i += 2 { + res[i/2].SetKey(strs[i]) + res[i/2].SetValue(strs[i+1]) + } + + return res +} + +func TestHeadRequest(t *testing.T) { + req := new(objectV2.HeadRequest) + + meta := new(session.RequestMetaHeader) + req.SetMetaHeader(meta) + + body := new(objectV2.HeadRequestBody) + req.SetBody(body) + + addr := oidtest.Address() + + var addrV2 refs.Address + addr.WriteToV2(&addrV2) + + body.SetAddress(&addrV2) + + xKey := "x-key" + xVal := "x-val" + xHdrs := testXHeaders( + xKey, xVal, + ) + + meta.SetXHeaders(xHdrs) + + obj := objectSDK.New() + + attrKey := "attr_key" + attrVal := "attr_val" + var attr objectSDK.Attribute + attr.SetKey(attrKey) + attr.SetValue(attrVal) + obj.SetAttributes(attr) + + table := new(eaclSDK.Table) + + priv, err := keys.NewPrivateKey() + require.NoError(t, err) + senderKey := priv.PublicKey() + + r := eaclSDK.NewRecord() + r.SetOperation(eaclSDK.OperationHead) + r.SetAction(eaclSDK.ActionDeny) + r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) + r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) + eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) + + table.AddRecord(r) + + lStorage := &testLocalStorage{ + t: t, + expAddr: addr, + obj: obj, + } + + id := addr.Object() + + newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { + hdrSrc, err := NewMessageHeaderSource( + lStorage, + NewRequestXHeaderSource(req), + addr.Container(), + WithOID(&id)) + require.NoError(t, err) + return hdrSrc + } + + cnr := addr.Container() + + unit := new(eaclSDK.ValidationUnit). + WithContainerID(&cnr). + WithOperation(eaclSDK.OperationHead). + WithSenderKey(senderKey.Bytes()). + WithEACLTable(table) + + validator := eaclSDK.NewValidator() + + checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) + + meta.SetXHeaders(nil) + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + meta.SetXHeaders(xHdrs) + + obj.SetAttributes() + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + lStorage.err = errors.New("any error") + + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) + + r.SetAction(eaclSDK.ActionAllow) + + rID := eaclSDK.NewRecord() + rID.SetOperation(eaclSDK.OperationHead) + rID.SetAction(eaclSDK.ActionDeny) + rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) + eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) + + table = eaclSDK.NewTable() + table.AddRecord(r) + table.AddRecord(rID) + + unit.WithEACLTable(table) + checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) +} + +func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { + actual, fromRule := v.CalculateAction(u) + require.True(t, fromRule) + require.Equal(t, expected, actual) +} + +func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { + actual, fromRule := v.CalculateAction(u) + require.False(t, fromRule) + require.Equal(t, eaclSDK.ActionAllow, actual) +} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go new file mode 100644 index 000000000..ecb793df8 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/headers.go @@ -0,0 +1,246 @@ +package v2 + +import ( + "context" + "errors" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" +) + +type Option func(*cfg) + +type cfg struct { + storage ObjectStorage + + msg XHeaderSource + + cnr cid.ID + obj *oid.ID +} + +type ObjectStorage interface { + Head(context.Context, oid.Address) (*objectSDK.Object, error) +} + +type Request interface { + GetMetaHeader() *session.RequestMetaHeader +} + +type Response interface { + GetMetaHeader() *session.ResponseMetaHeader +} + +type headerSource struct { + requestHeaders []eaclSDK.Header + objectHeaders []eaclSDK.Header + + incompleteObjectHeaders bool +} + +func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { + cfg := &cfg{ + storage: os, + cnr: cnrID, + msg: xhs, + } + + for i := range opts { + opts[i](cfg) + } + + if cfg.msg == nil { + return nil, errors.New("message is not provided") + } + + var res headerSource + + err := cfg.readObjectHeaders(&res) + if err != nil { + return nil, err + } + + res.requestHeaders = cfg.msg.GetXHeaders() + + return res, nil +} + +func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { + switch typ { + default: + return nil, true + case eaclSDK.HeaderFromRequest: + return h.requestHeaders, true + case eaclSDK.HeaderFromObject: + return h.objectHeaders, !h.incompleteObjectHeaders + } +} + +type xHeader session.XHeader + +func (x xHeader) Key() string { + return (*session.XHeader)(&x).GetKey() +} + +func (x xHeader) Value() string { + return (*session.XHeader)(&x).GetValue() +} + +var errMissingOID = errors.New("object ID is missing") + +func (h *cfg) readObjectHeaders(dst *headerSource) error { + switch m := h.msg.(type) { + default: + panic(fmt.Sprintf("unexpected message type %T", h.msg)) + case requestXHeaderSource: + return h.readObjectHeadersFromRequestXHeaderSource(m, dst) + case responseXHeaderSource: + return h.readObjectHeadersResponseXHeaderSource(m, dst) + } +} + +func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { + switch req := m.req.(type) { + case + *objectV2.GetRequest, + *objectV2.HeadRequest: + if h.obj == nil { + return errMissingOID + } + + objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) + + dst.objectHeaders = objHeaders + dst.incompleteObjectHeaders = !completed + case + *objectV2.GetRangeRequest, + *objectV2.GetRangeHashRequest, + *objectV2.DeleteRequest: + if h.obj == nil { + return errMissingOID + } + + dst.objectHeaders = addressHeaders(h.cnr, h.obj) + case *objectV2.PutRequest: + if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { + oV2 := new(objectV2.Object) + oV2.SetObjectID(v.GetObjectID()) + oV2.SetHeader(v.GetHeader()) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + case *objectV2.PutSingleRequest: + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) + case *objectV2.SearchRequest: + cnrV2 := req.GetBody().GetContainerID() + var cnr cid.ID + + if cnrV2 != nil { + if err := cnr.ReadFromV2(*cnrV2); err != nil { + return fmt.Errorf("can't parse container ID: %w", err) + } + } + + dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} + } + return nil +} + +func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { + switch resp := m.resp.(type) { + default: + objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) + + dst.objectHeaders = objectHeaders + dst.incompleteObjectHeaders = !completed + case *objectV2.GetResponse: + if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { + oV2 := new(objectV2.Object) + oV2.SetObjectID(v.GetObjectID()) + oV2.SetHeader(v.GetHeader()) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + case *objectV2.HeadResponse: + oV2 := new(objectV2.Object) + + var hdr *objectV2.Header + + switch v := resp.GetBody().GetHeaderPart().(type) { + case *objectV2.ShortHeader: + hdr = new(objectV2.Header) + + var idV2 refsV2.ContainerID + h.cnr.WriteToV2(&idV2) + + hdr.SetContainerID(&idV2) + hdr.SetVersion(v.GetVersion()) + hdr.SetCreationEpoch(v.GetCreationEpoch()) + hdr.SetOwnerID(v.GetOwnerID()) + hdr.SetObjectType(v.GetObjectType()) + hdr.SetPayloadLength(v.GetPayloadLength()) + case *objectV2.HeaderWithSignature: + hdr = v.GetHeader() + } + + oV2.SetHeader(hdr) + + dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) + } + return nil +} + +func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { + if idObj != nil { + var addr oid.Address + addr.SetContainer(cnr) + addr.SetObject(*idObj) + + obj, err := h.storage.Head(context.TODO(), addr) + if err == nil { + return headersFromObject(obj, cnr, idObj), true + } + } + + return addressHeaders(cnr, idObj), false +} + +func cidHeader(idCnr cid.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectContainerID, + v: idCnr.EncodeToString(), + } +} + +func oidHeader(obj oid.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectID, + v: obj.EncodeToString(), + } +} + +func ownerIDHeader(ownerID user.ID) sysObjHdr { + return sysObjHdr{ + k: acl.FilterObjectOwnerID, + v: ownerID.EncodeToString(), + } +} + +func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { + hh := make([]eaclSDK.Header, 0, 2) + hh = append(hh, cidHeader(cnr)) + + if oid != nil { + hh = append(hh, oidHeader(*oid)) + } + + return hh +} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go new file mode 100644 index 000000000..92570a3c5 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/object.go @@ -0,0 +1,92 @@ +package v2 + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +type sysObjHdr struct { + k, v string +} + +func (s sysObjHdr) Key() string { + return s.k +} + +func (s sysObjHdr) Value() string { + return s.v +} + +func u64Value(v uint64) string { + return strconv.FormatUint(v, 10) +} + +func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { + var count int + for obj := obj; obj != nil; obj = obj.Parent() { + count += 9 + len(obj.Attributes()) + } + + res := make([]eaclSDK.Header, 0, count) + for ; obj != nil; obj = obj.Parent() { + res = append(res, + cidHeader(cnr), + // creation epoch + sysObjHdr{ + k: acl.FilterObjectCreationEpoch, + v: u64Value(obj.CreationEpoch()), + }, + // payload size + sysObjHdr{ + k: acl.FilterObjectPayloadLength, + v: u64Value(obj.PayloadSize()), + }, + // object version + sysObjHdr{ + k: acl.FilterObjectVersion, + v: obj.Version().String(), + }, + // object type + sysObjHdr{ + k: acl.FilterObjectType, + v: obj.Type().String(), + }, + ) + + if oid != nil { + res = append(res, oidHeader(*oid)) + } + + if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { + res = append(res, ownerIDHeader(idOwner)) + } + + cs, ok := obj.PayloadChecksum() + if ok { + res = append(res, sysObjHdr{ + k: acl.FilterObjectPayloadHash, + v: cs.String(), + }) + } + + cs, ok = obj.PayloadHomomorphicHash() + if ok { + res = append(res, sysObjHdr{ + k: acl.FilterObjectHomomorphicHash, + v: cs.String(), + }) + } + + attrs := obj.Attributes() + for i := range attrs { + res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface + } + } + + return res +} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go new file mode 100644 index 000000000..d91a21c75 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/opts.go @@ -0,0 +1,11 @@ +package v2 + +import ( + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func WithOID(v *oid.ID) Option { + return func(c *cfg) { + c.obj = v + } +} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go new file mode 100644 index 000000000..ce380c117 --- /dev/null +++ b/pkg/services/object/acl/eacl/v2/xheader.go @@ -0,0 +1,69 @@ +package v2 + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" +) + +type XHeaderSource interface { + GetXHeaders() []eaclSDK.Header +} + +type requestXHeaderSource struct { + req Request +} + +func NewRequestXHeaderSource(req Request) XHeaderSource { + return requestXHeaderSource{req: req} +} + +type responseXHeaderSource struct { + resp Response + + req Request +} + +func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { + return responseXHeaderSource{resp: resp, req: req} +} + +func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { + ln := 0 + + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + ln += len(meta.GetXHeaders()) + } + + res := make([]eaclSDK.Header, 0, ln) + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + x := meta.GetXHeaders() + for i := range x { + res = append(res, (xHeader)(x[i])) + } + } + + return res +} + +func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { + ln := 0 + xHdrs := make([][]session.XHeader, 0) + + for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { + x := meta.GetXHeaders() + + ln += len(x) + + xHdrs = append(xHdrs, x) + } + + res := make([]eaclSDK.Header, 0, ln) + + for i := range xHdrs { + for j := range xHdrs[i] { + res = append(res, xHeader(xHdrs[i][j])) + } + } + + return res +} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go new file mode 100644 index 000000000..cd2de174a --- /dev/null +++ b/pkg/services/object/acl/v2/errors.go @@ -0,0 +1,20 @@ +package v2 + +import ( + "fmt" +) + +const invalidRequestMessage = "malformed request" + +func malformedRequestError(reason string) error { + return fmt.Errorf("%s: %s", invalidRequestMessage, reason) +} + +var ( + errEmptyBody = malformedRequestError("empty body") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go new file mode 100644 index 000000000..15fcce884 --- /dev/null +++ b/pkg/services/object/acl/v2/opts.go @@ -0,0 +1,12 @@ +package v2 + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" +) + +// WithLogger returns option to set logger. +func WithLogger(v *logger.Logger) Option { + return func(c *cfg) { + c.log = v + } +} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go new file mode 100644 index 000000000..8bd34ccb3 --- /dev/null +++ b/pkg/services/object/acl/v2/request.go @@ -0,0 +1,152 @@ +package v2 + +import ( + "crypto/ecdsa" + "fmt" + + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +// RequestInfo groups parsed version-independent (from SDK library) +// request information and raw API request. +type RequestInfo struct { + basicACL acl.Basic + requestRole acl.Role + operation acl.Op // put, get, head, etc. + cnrOwner user.ID // container owner + + // cnrNamespace defined to which namespace a container is belonged. + cnrNamespace string + + idCnr cid.ID + + // optional for some request + // e.g. Put, Search + obj *oid.ID + + senderKey []byte + + bearer *bearer.Token // bearer token of request + + srcRequest any +} + +func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { + r.basicACL = basicACL +} + +func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { + r.requestRole = requestRole +} + +func (r *RequestInfo) SetSenderKey(senderKey []byte) { + r.senderKey = senderKey +} + +// Request returns raw API request. +func (r RequestInfo) Request() any { + return r.srcRequest +} + +// ContainerOwner returns owner if the container. +func (r RequestInfo) ContainerOwner() user.ID { + return r.cnrOwner +} + +func (r RequestInfo) ContainerNamespace() string { + return r.cnrNamespace +} + +// ObjectID return object ID. +func (r RequestInfo) ObjectID() *oid.ID { + return r.obj +} + +// ContainerID return container ID. +func (r RequestInfo) ContainerID() cid.ID { + return r.idCnr +} + +// CleanBearer forces cleaning bearer token information. +func (r *RequestInfo) CleanBearer() { + r.bearer = nil +} + +// Bearer returns bearer token of the request. +func (r RequestInfo) Bearer() *bearer.Token { + return r.bearer +} + +// BasicACL returns basic ACL of the container. +func (r RequestInfo) BasicACL() acl.Basic { + return r.basicACL +} + +// SenderKey returns public key of the request's sender. +func (r RequestInfo) SenderKey() []byte { + return r.senderKey +} + +// Operation returns request's operation. +func (r RequestInfo) Operation() acl.Op { + return r.operation +} + +// RequestRole returns request sender's role. +func (r RequestInfo) RequestRole() acl.Role { + return r.requestRole +} + +// MetaWithToken groups session and bearer tokens, +// verification header and raw API request. +type MetaWithToken struct { + vheader *sessionV2.RequestVerificationHeader + token *sessionSDK.Object + bearer *bearer.Token + src any +} + +// RequestOwner returns ownerID and its public key +// according to internal meta information. +func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if r.vheader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if r.bearer != nil && r.bearer.Impersonate() { + return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if r.token != nil { + // verify signature of session token + return ownerFromToken(r.token) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(r.vheader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/acl/v2/request_test.go similarity index 83% rename from pkg/services/object/ape/metadata_test.go rename to pkg/services/object/acl/v2/request_test.go index fd919008f..618af3469 100644 --- a/pkg/services/object/ape/metadata_test.go +++ b/pkg/services/object/acl/v2/request_test.go @@ -1,4 +1,4 @@ -package ape +package v2 import ( "testing" @@ -32,33 +32,33 @@ func TestRequestOwner(t *testing.T) { vh.SetBodySignature(&userSignature) t.Run("empty verification header", func(t *testing.T) { - req := Metadata{} + req := MetaWithToken{} checkOwner(t, req, nil, errEmptyVerificationHeader) }) t.Run("empty verification header signature", func(t *testing.T) { - req := Metadata{ - VerificationHeader: new(sessionV2.RequestVerificationHeader), + req := MetaWithToken{ + vheader: new(sessionV2.RequestVerificationHeader), } checkOwner(t, req, nil, errEmptyBodySig) }) t.Run("no tokens", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, + req := MetaWithToken{ + vheader: vh, } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, false), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, false), } checkOwner(t, req, userPk.PublicKey(), nil) }) t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, true), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -67,17 +67,17 @@ func TestRequestOwner(t *testing.T) { pk, err := keys.NewPrivateKey() require.NoError(t, err) - req := Metadata{ - VerificationHeader: vh, - BearerToken: newBearer(t, containerOwner, userID, true), - SessionToken: newSession(t, pk), + req := MetaWithToken{ + vheader: vh, + bearer: newBearer(t, containerOwner, userID, true), + token: newSession(t, pk), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) t.Run("with session", func(t *testing.T) { - req := Metadata{ - VerificationHeader: vh, - SessionToken: newSession(t, containerOwner), + req := MetaWithToken{ + vheader: vh, + token: newSession(t, containerOwner), } checkOwner(t, req, containerOwner.PublicKey(), nil) }) @@ -118,9 +118,9 @@ func TestRequestOwner(t *testing.T) { var tok sessionSDK.Object require.NoError(t, tok.ReadFromV2(tokV2)) - req := Metadata{ - VerificationHeader: vh, - SessionToken: &tok, + req := MetaWithToken{ + vheader: vh, + token: &tok, } checkOwner(t, req, nil, errInvalidSessionOwner) }) @@ -152,7 +152,7 @@ func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool return &tok } -func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { +func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { _, actual, err := req.RequestOwner() if expectedErr != nil { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go new file mode 100644 index 000000000..86daec6cc --- /dev/null +++ b/pkg/services/object/acl/v2/service.go @@ -0,0 +1,779 @@ +package v2 + +import ( + "context" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "go.uber.org/zap" +) + +// Service checks basic ACL rules. +type Service struct { + *cfg + + c objectCore.SenderClassifier +} + +type putStreamBasicChecker struct { + source *Service + next object.PutObjectStream +} + +type patchStreamBasicChecker struct { + source *Service + next object.PatchObjectStream + nonFirstSend bool +} + +// Option represents Service constructor option. +type Option func(*cfg) + +type cfg struct { + log *logger.Logger + + containers container.Source + + irFetcher InnerRingFetcher + + nm netmap.Source + + next object.ServiceServer +} + +// New is a constructor for object ACL checking service. +func New(next object.ServiceServer, + nm netmap.Source, + irf InnerRingFetcher, + cs container.Source, + opts ...Option, +) Service { + cfg := &cfg{ + log: logger.NewLoggerWrapper(zap.L()), + next: next, + nm: nm, + irFetcher: irf, + containers: cs, + } + + for i := range opts { + opts[i](cfg) + } + + return Service{ + cfg: cfg, + c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), + } +} + +// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedGetObjectStream struct { + object.GetObjectStream + + requestInfo RequestInfo +} + +func (w *wrappedGetObjectStream) Context() context.Context { + return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { + return &wrappedGetObjectStream{ + GetObjectStream: getObjectStream, + requestInfo: reqInfo, + } +} + +// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedRangeStream struct { + object.GetObjectRangeStream + + requestInfo RequestInfo +} + +func (w *wrappedRangeStream) Context() context.Context { + return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { + return &wrappedRangeStream{ + GetObjectRangeStream: rangeStream, + requestInfo: reqInfo, + } +} + +// wrappedSearchStream propagates RequestContext into SearchStream's context. +// This allows to retrieve already calculated immutable request-specific values in next handler invocation. +type wrappedSearchStream struct { + object.SearchStream + + requestInfo RequestInfo +} + +func (w *wrappedSearchStream) Context() context.Context { + return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ + Namespace: w.requestInfo.ContainerNamespace(), + ContainerOwner: w.requestInfo.ContainerOwner(), + SenderKey: w.requestInfo.SenderKey(), + Role: w.requestInfo.RequestRole(), + BearerToken: w.requestInfo.Bearer(), + }) +} + +func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { + return &wrappedSearchStream{ + SearchStream: searchStream, + requestInfo: reqInfo, + } +} + +// Get implements ServiceServer interface, makes ACL checks and calls +// next Get method in the ServiceServer pipeline. +func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) + if err != nil { + return err + } + + reqInfo.obj = obj + + return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) +} + +func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { + streamer, err := b.next.Put(ctx) + + return putStreamBasicChecker{ + source: &b, + next: streamer, + }, err +} + +func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { + streamer, err := b.next.Patch(ctx) + + return &patchStreamBasicChecker{ + source: &b, + next: streamer, + }, err +} + +func (b Service) Head( + ctx context.Context, + request *objectV2.HeadRequest, +) (*objectV2.HeadResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.Head(requestContext(ctx, reqInfo), request) +} + +func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { + id, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, id, nil) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) + if err != nil { + return err + } + + return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) +} + +func (b Service) Delete( + ctx context.Context, + request *objectV2.DeleteRequest, +) (*objectV2.DeleteResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.Delete(requestContext(ctx, reqInfo), request) +} + +func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) + if err != nil { + return err + } + + reqInfo.obj = obj + + return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) +} + +func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { + return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ + Namespace: reqInfo.ContainerNamespace(), + ContainerOwner: reqInfo.ContainerOwner(), + SenderKey: reqInfo.SenderKey(), + Role: reqInfo.RequestRole(), + BearerToken: reqInfo.Bearer(), + }) +} + +func (b Service) GetRangeHash( + ctx context.Context, + request *objectV2.GetRangeHashRequest, +) (*objectV2.GetRangeHashResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + obj, err := getObjectIDFromRequestBody(request.GetBody()) + if err != nil { + return nil, err + } + + sTok, err := originalSessionToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + if sTok != nil { + err = assertSessionRelation(*sTok, cnr, obj) + if err != nil { + return nil, err + } + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) +} + +func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return nil, err + } + + idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() + if idV2 == nil { + return nil, errors.New("missing object owner") + } + + var idOwner user.ID + + err = idOwner.ReadFromV2(*idV2) + if err != nil { + return nil, fmt.Errorf("invalid object owner: %w", err) + } + + obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) + if err != nil { + return nil, err + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return nil, err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return nil, err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) + if err != nil { + return nil, err + } + + reqInfo.obj = obj + + return b.next.PutSingle(requestContext(ctx, reqInfo), request) +} + +func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { + body := request.GetBody() + if body == nil { + return errEmptyBody + } + + part := body.GetObjectPart() + if part, ok := part.(*objectV2.PutObjectPartInit); ok { + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + idV2 := part.GetHeader().GetOwnerID() + if idV2 == nil { + return errors.New("missing object owner") + } + + var idOwner user.ID + + err = idOwner.ReadFromV2(*idV2) + if err != nil { + return fmt.Errorf("invalid object owner: %w", err) + } + + objV2 := part.GetObjectID() + var obj *oid.ID + + if objV2 != nil { + obj = new(oid.ID) + + err = obj.ReadFromV2(*objV2) + if err != nil { + return err + } + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) + if err != nil { + return err + } + + reqInfo.obj = obj + + ctx = requestContext(ctx, reqInfo) + } + + return p.next.Send(ctx, request) +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} + +func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { + return p.next.CloseAndRecv(ctx) +} + +func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { + body := request.GetBody() + if body == nil { + return errEmptyBody + } + + if !p.nonFirstSend { + p.nonFirstSend = true + + cnr, err := getContainerIDFromRequest(request) + if err != nil { + return err + } + + objV2 := request.GetBody().GetAddress().GetObjectID() + if objV2 == nil { + return errors.New("missing oid") + } + obj := new(oid.ID) + err = obj.ReadFromV2(*objV2) + if err != nil { + return err + } + + var sTok *sessionSDK.Object + sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) + if err != nil { + return err + } + + bTok, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return err + } + + req := MetaWithToken{ + vheader: request.GetVerificationHeader(), + token: sTok, + bearer: bTok, + src: request, + } + + reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) + if err != nil { + return err + } + + reqInfo.obj = obj + + ctx = requestContext(ctx, reqInfo) + } + + return p.next.Send(ctx, request) +} + +func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + return p.next.CloseAndRecv(ctx) +} + +func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { + cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container + if err != nil { + return info, err + } + + if req.token != nil { + currentEpoch, err := b.nm.Epoch(ctx) + if err != nil { + return info, errors.New("can't fetch current epoch") + } + if req.token.ExpiredAt(currentEpoch) { + return info, new(apistatus.SessionTokenExpired) + } + if req.token.InvalidAt(currentEpoch) { + return info, fmt.Errorf("%s: token is invalid at %d epoch)", + invalidRequestMessage, currentEpoch) + } + + if !assertVerb(*req.token, op) { + return info, errInvalidVerb + } + } + + // find request role and key + ownerID, ownerKey, err := req.RequestOwner() + if err != nil { + return info, err + } + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) + if err != nil { + return info, err + } + + info.basicACL = cnr.Value.BasicACL() + info.requestRole = res.Role + info.operation = op + info.cnrOwner = cnr.Value.Owner() + info.idCnr = idCnr + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + info.cnrNamespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + info.senderKey = res.Key + + // add bearer token if it is present in request + info.bearer = req.bearer + + info.srcRequest = req.src + + return info, nil +} + +// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. +func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { + cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container + if err != nil { + return info, err + } + + if req.token != nil { + currentEpoch, err := b.nm.Epoch(ctx) + if err != nil { + return info, errors.New("can't fetch current epoch") + } + if req.token.ExpiredAt(currentEpoch) { + return info, new(apistatus.SessionTokenExpired) + } + if req.token.InvalidAt(currentEpoch) { + return info, fmt.Errorf("%s: token is invalid at %d epoch)", + invalidRequestMessage, currentEpoch) + } + } + + // find request role and key + ownerID, ownerKey, err := req.RequestOwner() + if err != nil { + return info, err + } + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) + if err != nil { + return info, err + } + + info.basicACL = cnr.Value.BasicACL() + info.requestRole = res.Role + info.cnrOwner = cnr.Value.Owner() + info.idCnr = idCnr + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + info.cnrNamespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + info.senderKey = res.Key + + // add bearer token if it is present in request + info.bearer = req.bearer + + info.srcRequest = req.src + + return info, nil +} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go new file mode 100644 index 000000000..3cf10eb56 --- /dev/null +++ b/pkg/services/object/acl/v2/types.go @@ -0,0 +1,11 @@ +package v2 + +import "context" + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys(ctx context.Context) ([][]byte, error) +} diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/acl/v2/util.go similarity index 58% rename from pkg/services/object/ape/util.go rename to pkg/services/object/acl/v2/util.go index 5cd2caa50..e02f70771 100644 --- a/pkg/services/object/ape/util.go +++ b/pkg/services/object/acl/v2/util.go @@ -1,4 +1,4 @@ -package ape +package v2 import ( "crypto/ecdsa" @@ -6,34 +6,57 @@ import ( "errors" "fmt" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) -func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return +var errMissingContainerID = errors.New("missing container ID") + +func getContainerIDFromRequest(req any) (cid.ID, error) { + var idV2 *refsV2.ContainerID + var id cid.ID + + switch v := req.(type) { + case *objectV2.GetRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.PutRequest: + part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) + if !ok { + return cid.ID{}, errors.New("can't get container ID in chunk") } - } else { - err = errMissingContainerID - return + + idV2 = part.GetHeader().GetContainerID() + case *objectV2.HeadRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.SearchRequest: + idV2 = v.GetBody().GetContainerID() + case *objectV2.DeleteRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.GetRangeRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.GetRangeHashRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + case *objectV2.PutSingleRequest: + idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() + case *objectV2.PatchRequest: + idV2 = v.GetBody().GetAddress().GetContainerID() + default: + return cid.ID{}, errors.New("unknown request type") } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + if idV2 == nil { + return cid.ID{}, errMissingContainerID } - return + + return id, id.ReadFromV2(*idV2) } // originalBearerToken goes down to original request meta header and fetches @@ -52,6 +75,50 @@ func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, er return &tok, tok.ReadFromV2(*tokV2) } +// originalSessionToken goes down to original request meta header and fetches +// session token from there. +func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { + for header.GetOrigin() != nil { + header = header.GetOrigin() + } + + tokV2 := header.GetSessionToken() + if tokV2 == nil { + return nil, nil + } + + var tok sessionSDK.Object + + err := tok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + return &tok, nil +} + +// getObjectIDFromRequestBody decodes oid.ID from the common interface of the +// object reference's holders. Returns an error if object ID is missing in the request. +func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { + idV2 := body.GetAddress().GetObjectID() + return getObjectIDFromRefObjectID(idV2) +} + +func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { + if idV2 == nil { + return nil, errors.New("missing object ID") + } + + var id oid.ID + + err := id.ReadFromV2(*idV2) + if err != nil { + return nil, err + } + + return &id, nil +} + func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { // 1. First check signature of session token. if !token.VerifySignature() { @@ -105,16 +172,16 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -// assertVerb checks that token verb corresponds to the method. -func assertVerb(tok sessionSDK.Object, method string) bool { - switch method { - case nativeschema.MethodPutObject: +// assertVerb checks that token verb corresponds to op. +func assertVerb(tok sessionSDK.Object, op acl.Op) bool { + switch op { + case acl.OpObjectPut: return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case nativeschema.MethodDeleteObject: + case acl.OpObjectDelete: return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case nativeschema.MethodGetObject: + case acl.OpObjectGet: return tok.AssertVerb(sessionSDK.VerbObjectGet) - case nativeschema.MethodHeadObject: + case acl.OpObjectHead: return tok.AssertVerb( sessionSDK.VerbObjectHead, sessionSDK.VerbObjectGet, @@ -123,15 +190,14 @@ func assertVerb(tok sessionSDK.Object, method string) bool { sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch, ) - case nativeschema.MethodSearchObject: + case acl.OpObjectSearch: return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case nativeschema.MethodRangeObject: + case acl.OpObjectRange: return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case nativeschema.MethodHashObject: + case acl.OpObjectHash: return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) - case nativeschema.MethodPatchObject: - return tok.AssertVerb(sessionSDK.VerbObjectPatch) } + return false } @@ -155,15 +221,3 @@ func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error return nil } - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go new file mode 100644 index 000000000..40fce8877 --- /dev/null +++ b/pkg/services/object/acl/v2/util_test.go @@ -0,0 +1,131 @@ +package v2 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "slices" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" + aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" + "github.com/stretchr/testify/require" +) + +func TestOriginalTokens(t *testing.T) { + sToken := sessiontest.ObjectSigned() + bToken := bearertest.Token() + + pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, bToken.Sign(*pk)) + + var bTokenV2 acl.BearerToken + bToken.WriteToV2(&bTokenV2) + // This line is needed because SDK uses some custom format for + // reserved filters, so `cid.ID` is not converted to string immediately. + require.NoError(t, bToken.ReadFromV2(bTokenV2)) + + var sTokenV2 session.Token + sToken.WriteToV2(&sTokenV2) + + for i := range 10 { + metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) + res, err := originalSessionToken(metaHeaders) + require.NoError(t, err) + require.Equal(t, sToken, res, i) + + bTok, err := originalBearerToken(metaHeaders) + require.NoError(t, err) + require.Equal(t, &bToken, bTok, i) + } +} + +func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { + metaHeader := new(session.RequestMetaHeader) + metaHeader.SetBearerToken(b) + metaHeader.SetSessionToken(s) + + for range depth { + link := metaHeader + metaHeader = new(session.RequestMetaHeader) + metaHeader.SetOrigin(link) + } + + return metaHeader +} + +func TestIsVerbCompatible(t *testing.T) { + // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 + table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ + aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, + aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, + aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, + aclsdk.OpObjectHead: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + }, + aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, + aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, + aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + contains := slices.Contains(list, verb) + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index b96757def..ee46a6fe4 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -76,10 +76,9 @@ var errMissingOID = errors.New("object ID is not set") // CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - switch prm.Role { - case nativeschema.PropertyValueContainerRoleContainer: + if prm.Role == nativeschema.PropertyValueContainerRoleContainer { return nil - case nativeschema.PropertyValueContainerRoleIR: + } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 82e660a7f..6e458b384 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -7,21 +7,6 @@ import ( apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) -var ( - errMissingContainerID = malformedRequestError("missing container ID") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) - -func malformedRequestError(reason string) error { - invalidArgErr := &apistatus.InvalidArgument{} - invalidArgErr.SetMessage(reason) - return invalidArgErr -} - func toStatusErr(err error) error { var chRouterErr *checkercore.ChainRouterError if !errors.As(err, &chRouterErr) { diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go deleted file mode 100644 index b37c3b6f8..000000000 --- a/pkg/services/object/ape/metadata.go +++ /dev/null @@ -1,172 +0,0 @@ -package ape - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type Metadata struct { - Container cid.ID - Object *oid.ID - MetaHeader *session.RequestMetaHeader - VerificationHeader *session.RequestVerificationHeader - SessionToken *sessionSDK.Object - BearerToken *bearer.Token -} - -func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if m.VerificationHeader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if m.BearerToken != nil && m.BearerToken.Impersonate() { - return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if m.SessionToken != nil { - // verify signature of session token - return ownerFromToken(m.SessionToken) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(m.VerificationHeader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -// RequestInfo contains request information extracted by request metadata. -type RequestInfo struct { - // Role defines under which role this request is executed. - // It must be represented only as a constant represented in native schema. - Role string - - ContainerOwner user.ID - - // Namespace defines to which namespace a container is belonged. - Namespace string - - // HEX-encoded sender key. - SenderKey string -} - -type RequestInfoExtractor interface { - GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) -} - -type extractor struct { - containers container.Source - - nm netmap.Source - - classifier objectCore.SenderClassifier -} - -func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { - return &extractor{ - containers: containers, - nm: nm, - classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), - } -} - -func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { - currentEpoch, err := e.nm.Epoch(ctx) - if err != nil { - return errors.New("can't fetch current epoch") - } - if sessionToken.ExpiredAt(currentEpoch) { - return new(apistatus.SessionTokenExpired) - } - if sessionToken.InvalidAt(currentEpoch) { - return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) - } - if !assertVerb(*sessionToken, method) { - return errInvalidVerb - } - return nil -} - -func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { - cnr, err := e.containers.Get(ctx, m.Container) - if err != nil { - return ri, err - } - - if m.SessionToken != nil { - if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { - return ri, err - } - } - - ownerID, ownerKey, err := m.RequestOwner() - if err != nil { - return ri, err - } - res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) - if err != nil { - return ri, err - } - - ri.Role = nativeSchemaRole(res.Role) - ri.ContainerOwner = cnr.Value.Owner() - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - ri.Namespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - ri.SenderKey = hex.EncodeToString(res.Key) - - return ri, nil -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index e199e2638..d9594a3fc 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,6 +2,9 @@ package ape import ( "context" + "encoding/hex" + "errors" + "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -9,18 +12,19 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) +var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") + type Service struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.ServiceServer } @@ -60,10 +64,9 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { return &Service{ apeChecker: apeChecker, - extractor: extractor, next: next, } } @@ -73,9 +76,15 @@ type getStreamBasicChecker struct { apeChecker Checker - metadata Metadata + namespace string - reqInfo RequestInfo + senderKey []byte + + containerOwner user.ID + + role string + + bearerToken *bearer.Token } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -86,15 +95,15 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.reqInfo.Namespace, + Namespace: g.namespace, Container: cnrID, Object: objID, Header: partInit.GetHeader(), Method: nativeschema.MethodGetObject, - SenderKey: g.reqInfo.SenderKey, - ContainerOwner: g.reqInfo.ContainerOwner, - Role: g.reqInfo.Role, - BearerToken: g.metadata.BearerToken, + SenderKey: hex.EncodeToString(g.senderKey), + ContainerOwner: g.containerOwner, + Role: g.role, + BearerToken: g.bearerToken, XHeaders: resp.GetMetaHeader().GetXHeaders(), } @@ -105,53 +114,69 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } +func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { + untyped := ctx.Value(objectSvc.RequestContextKey) + if untyped == nil { + return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) + } + rc, ok := untyped.(*objectSvc.RequestContext) + if !ok { + return nil, errFailedToCastToRequestContext + } + return rc, nil +} + func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqCtx, err := requestContext(stream.Context()) if err != nil { - return err - } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) - if err != nil { - return err + return toStatusErr(err) } + return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - metadata: md, - reqInfo: reqInfo, + namespace: reqCtx.Namespace, + senderKey: reqCtx.SenderKey, + containerOwner: reqCtx.ContainerOwner, + role: nativeSchemaRole(reqCtx.Role), + bearerToken: reqCtx.BearerToken, }) } type putStreamBasicChecker struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqCtx, err := requestContext(ctx) if err != nil { - return err + return toStatusErr(err) } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) + + cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return err + return toStatusErr(err) } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Header: partInit.GetHeader(), Method: nativeschema.MethodPutObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + Role: nativeSchemaRole(reqCtx.Role), + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -171,7 +196,6 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { return &putStreamBasicChecker{ apeChecker: c.apeChecker, - extractor: c.extractor, next: streamer, }, err } @@ -179,36 +203,40 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { type patchStreamBasicChecker struct { apeChecker Checker - extractor RequestInfoExtractor - next objectSvc.PatchObjectStream nonFirstSend bool } func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + if !p.nonFirstSend { p.nonFirstSend = true - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqCtx, err := requestContext(ctx) if err != nil { - return err + return toStatusErr(err) } - reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return err + return toStatusErr(err) } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Method: nativeschema.MethodPatchObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + Role: nativeSchemaRole(reqCtx.Role), + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -228,17 +256,22 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error return &patchStreamBasicChecker{ apeChecker: c.apeChecker, - extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } @@ -252,7 +285,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - md.Container.WriteToV2(cidV2) + cnrID.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -268,16 +301,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Header: header, Method: nativeschema.MethodHeadObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -286,24 +319,32 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) - if err != nil { - return err + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + + var cnrID cid.ID + if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { + if err := cnrID.ReadFromV2(*cnrV2); err != nil { + return toStatusErr(err) + } + } + + reqCtx, err := requestContext(stream.Context()) if err != nil { - return err + return toStatusErr(err) } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, + Namespace: reqCtx.Namespace, + Container: cnrID, Method: nativeschema.MethodSearchObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -313,25 +354,31 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Method: nativeschema.MethodDeleteObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -346,25 +393,31 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) - if err != nil { - return err + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return err + return toStatusErr(err) + } + + reqCtx, err := requestContext(stream.Context()) + if err != nil { + return toStatusErr(err) } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Method: nativeschema.MethodRangeObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -374,25 +427,31 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Method: nativeschema.MethodHashObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -407,26 +466,32 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) + + reqCtx, err := requestContext(ctx) if err != nil { return nil, err } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, + Namespace: reqCtx.Namespace, + Container: cnrID, + Object: objID, Header: request.GetBody().GetObject().GetHeader(), Method: nativeschema.MethodPutObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Role: nativeSchemaRole(reqCtx.Role), + SenderKey: hex.EncodeToString(reqCtx.SenderKey), + ContainerOwner: reqCtx.ContainerOwner, + BearerToken: reqCtx.BearerToken, + XHeaders: meta.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -436,36 +501,18 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -type request interface { - GetMetaHeader() *session.RequestMetaHeader - GetVerificationHeader() *session.RequestVerificationHeader -} - -func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin +func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return + } } - cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) - if err != nil { - return - } - session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) - if err != nil { - return - } - bearer, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return - } - - md = Metadata{ - Container: cnrID, - Object: objID, - VerificationHeader: request.GetVerificationHeader(), - SessionToken: session, - BearerToken: bearer, + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 97dbfa658..46e55360d 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,11 +7,3 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys(ctx context.Context) ([][]byte, error) -} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go deleted file mode 100644 index 916bce427..000000000 --- a/pkg/services/object/ape/util_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package ape - -import ( - "slices" - "testing" - - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" - "github.com/stretchr/testify/require" -) - -func TestIsVerbCompatible(t *testing.T) { - table := map[string][]sessionSDK.ObjectVerb{ - nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, - nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, - nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, - nativeschema.MethodHeadObject: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectPatch, - }, - nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, - nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, - nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - sessionSDK.VerbObjectPatch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - contains := slices.Contains(list, verb) - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index f2bd907db..b2ae79dbc 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -89,8 +89,10 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme if !ownerObj.Equals(ownerSession) { return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } - } else if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) + } else { + if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) + } } if prm.SignRequestPrivateKey == nil { diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 6593d3ca0..1998e9638 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(ctx, n.Opts...) + traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.submitPrimaryPlacementFinish() { + if n.Traversal.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) @@ -101,7 +101,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.submitProcessed(addr, item) + n.Traversal.submitProcessed(addr, item) } wg.Wait() diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index fff58aca7..f7486eae7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -95,10 +95,6 @@ func (x errIncompletePut) Error() string { return commonMsg } -func (x errIncompletePut) Unwrap() error { - return x.singleErr -} - // WriteObject implements the transformer.ObjectWriter interface. func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index d5eeddf21..2458e352f 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -130,7 +130,7 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - log, err := logger.NewLogger(logger.Prm{}) + log, err := logger.NewLogger(nil) require.NoError(t, err) var n nmKeys diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 1c4d7d585..867d3f4ef 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "objectSDK.Delete service")) } } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index e80132489..e164627d2 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.err + return detachedExecutor.statusError.err } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index dfb31133c..0ee8aed53 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -28,7 +28,16 @@ func (r *request) executeOnContainer(ctx context.Context) { localStatus := r.status - for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { + for { + if r.processCurrentEpoch(ctx, localStatus) { + break + } + + // check the maximum depth has been reached + if lookupDepth == 0 { + break + } + lookupDepth-- // go to the previous epoch diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 3a50308c2..557e9a028 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -87,51 +87,51 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.err + return exec.statusError.err } -func (r *request) execute(ctx context.Context) { - r.log.Debug(ctx, logs.ServingRequest) +func (exec *request) execute(ctx context.Context) { + exec.log.Debug(ctx, logs.ServingRequest) // perform local operation - r.executeLocal(ctx) + exec.executeLocal(ctx) - r.analyzeStatus(ctx, true) + exec.analyzeStatus(ctx, true) } -func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch r.status { + switch exec.status { case statusOK: - r.log.Debug(ctx, logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) + exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - r.assemble(ctx) + exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + exec.assemble(ctx) case statusOutOfRange: - r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) + exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - r.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if r.isRaw() && execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) + exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if exec.isRaw() && execCnr { + exec.executeOnContainer(ctx) + exec.analyzeStatus(ctx, false) } - r.assembleEC(ctx) + exec.assembleEC(ctx) default: - r.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(r.err), + exec.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(exec.err), ) var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(r.err, &errAccessDenied) { + if execCnr && errors.As(exec.err, &errAccessDenied) { // Local get can't return access denied error, so this error was returned by // write to the output stream. So there is no need to try to find object on other nodes. return } if execCnr { - r.executeOnContainer(ctx) - r.analyzeStatus(ctx, false) + exec.executeOnContainer(ctx) + exec.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index a103f5a7f..9ec10b5f2 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = l + s.log = l.With(zap.String("component", "Object.Get service")) } } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index 0ec8912fd..fc483b74b 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Object.Get V2 service")) } } diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index 0d73bcd4d..98207336c 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.Send(newResponse(p)) + return s.GetObjectStream.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.Send(newResponse(p)) + return s.GetObjectStream.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.Send(newRangeResponse(chunk)) + return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index 4b7dcc530..bfa7fd619 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -182,7 +182,9 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(sha256.New) + p.SetHashGenerator(func() hash.Hash { + return sha256.New() + }) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index 5d298bfed..953f82b48 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config, // Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.KeyStorage.GetKey(nil) + nodeKey, err := s.Config.KeyStorage.GetKey(nil) if err != nil { return nil, err } diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index ff13b1d3e..642b9f9fa 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -195,12 +195,7 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { patch.FromV2(req.GetBody()) if !s.nonFirstSend { - err := s.patcher.ApplyHeaderPatch(ctx, - patcher.ApplyHeaderPatchPrm{ - NewSplitHeader: patch.NewSplitHeader, - NewAttributes: patch.NewAttributes, - ReplaceAttributes: patch.ReplaceAttributes, - }) + err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes) if err != nil { return fmt.Errorf("patch attributes: %w", err) } diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 7aeb5857d..099486b3f 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -56,8 +56,8 @@ func NewService(ks *objutil.KeyStorage, } } -func (s *Service) Put() (*Streamer, error) { +func (p *Service) Put() (*Streamer, error) { return &Streamer{ - Config: s.Config, + Config: p.Config, }, nil } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 90f473254..f3c2dca1a 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -102,7 +102,7 @@ func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Obje return target.ErrWrongPayloadSize } - maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) + maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.NewNodeIterator(placement.placementOptions) + iter := s.Config.NewNodeIterator(placement.placementOptions) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.KeyStorage, + keyStorage: s.Config.KeyStorage, signer: &sync.Once{}, } @@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.KeyStorage.GetKey(nil) + key, err := s.Config.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.KeyStorage, + keyStorage: s.Config.KeyStorage, signer: &sync.Once{}, } @@ -225,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) + cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil @@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.ClientConstructor.Get(info) + c, err := s.Config.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.LocalStore, + Storage: s.Config.LocalStore, Container: container, } return localTarget.WriteObject(ctx, obj, meta) @@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, + s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go new file mode 100644 index 000000000..eb4041f80 --- /dev/null +++ b/pkg/services/object/request_context.go @@ -0,0 +1,24 @@ +package object + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" +) + +type RequestContextKeyT struct{} + +var RequestContextKey = RequestContextKeyT{} + +// RequestContext is a context passed between middleware handlers. +type RequestContext struct { + Namespace string + + SenderKey []byte + + ContainerOwner user.ID + + Role acl.Role + + BearerToken *bearer.Token +} diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index 56fe56468..e1aeca957 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l + c.log = l.With(zap.String("component", "Object.Search service")) } } diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index fd8e926dd..2b44227a5 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -96,8 +96,7 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PutResponse) + return nil, fmt.Errorf("could not close stream and receive response: %w", err) } } @@ -133,8 +132,7 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - err = fmt.Errorf("could not close stream and receive response: %w", err) - resp = new(object.PatchResponse) + return nil, fmt.Errorf("could not close stream and receive response: %w", err) } } diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go index 0f24a9d96..45e6df339 100644 --- a/pkg/services/object_manager/placement/metrics.go +++ b/pkg/services/object_manager/placement/metrics.go @@ -2,90 +2,24 @@ package placement import ( "errors" - "fmt" - "maps" - "math" "strings" - "sync" - "sync/atomic" - locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" - locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) const ( attrPrefix = "$attribute:" - - geoDistance = "$geoDistance" ) type Metric interface { CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int } -type metricsParser struct { - locodeDBPath string - locodes map[string]locodedb.Point -} - -type MetricParser interface { - ParseMetrics([]string) ([]Metric, error) -} - -func NewMetricsParser(locodeDBPath string) (MetricParser, error) { - return &metricsParser{ - locodeDBPath: locodeDBPath, - }, nil -} - -func (p *metricsParser) initLocodes() error { - if len(p.locodes) != 0 { - return nil +func ParseMetric(raw string) (Metric, error) { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + return NewAttributeMetric(attr), nil } - if len(p.locodeDBPath) > 0 { - p.locodes = make(map[string]locodedb.Point) - locodeDB := locodebolt.New(locodebolt.Prm{ - Path: p.locodeDBPath, - }, - locodebolt.ReadOnly(), - ) - err := locodeDB.Open() - if err != nil { - return err - } - defer locodeDB.Close() - err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { - p.locodes[k] = v - }) - if err != nil { - return err - } - return nil - } - return errors.New("set path to locode database") -} - -func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { - var metrics []Metric - for _, raw := range priority { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - metrics = append(metrics, NewAttributeMetric(attr)) - } else if raw == geoDistance { - err := p.initLocodes() - if err != nil { - return nil, err - } - if len(p.locodes) == 0 { - return nil, fmt.Errorf("provide locodes database for metric %s", raw) - } - m := NewGeoDistanceMetric(p.locodes) - metrics = append(metrics, m) - } else { - return nil, fmt.Errorf("unsupported priority metric %s", raw) - } - } - return metrics, nil + return nil, errors.New("unsupported priority metric") } // attributeMetric describes priority metric based on attribute. @@ -107,79 +41,3 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node func NewAttributeMetric(attr string) Metric { return &attributeMetric{attribute: attr} } - -// geoDistanceMetric describes priority metric based on attribute. -type geoDistanceMetric struct { - locodes map[string]locodedb.Point - distance *atomic.Pointer[map[string]int] - mtx sync.Mutex -} - -func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { - d := atomic.Pointer[map[string]int]{} - m := make(map[string]int) - d.Store(&m) - gm := &geoDistanceMetric{ - locodes: locodes, - distance: &d, - } - return gm -} - -// CalculateValue return distance in kilometers between current node and provided, -// if coordinates for provided node found. In other case return math.MaxInt. -func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { - fl := from.LOCODE() - tl := to.LOCODE() - if fl == tl { - return 0 - } - m := gm.distance.Load() - if v, ok := (*m)[fl+tl]; ok { - return v - } - return gm.calculateDistance(fl, tl) -} - -func (gm *geoDistanceMetric) calculateDistance(from, to string) int { - gm.mtx.Lock() - defer gm.mtx.Unlock() - od := gm.distance.Load() - if v, ok := (*od)[from+to]; ok { - return v - } - nd := maps.Clone(*od) - var dist int - pointFrom, okFrom := gm.locodes[from] - pointTo, okTo := gm.locodes[to] - if okFrom && okTo { - dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) - } else { - dist = math.MaxInt - } - nd[from+to] = dist - gm.distance.Store(&nd) - - return dist -} - -// distance return amount of KM between two points. -// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. -// Original implementation can be found here https://www.geodatasource.com/developers/go. -func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { - radLat1 := math.Pi * lt1 / 180 - radLat2 := math.Pi * lt2 / 180 - radTheta := math.Pi * (ln1 - ln2) / 180 - - dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) - - if dist > 1 { - dist = 1 - } - - dist = math.Acos(dist) - dist = dist * 180 / math.Pi - dist = dist * 60 * 1.1515 * 1.609344 - - return dist -} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index d1370f21e..9c825bf19 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -601,53 +601,4 @@ func TestTraverserPriorityMetrics(t *testing.T) { next = tr.Next() require.Nil(t, next) }) - - t.Run("one rep one geo metric", func(t *testing.T) { - t.Skip() - selectors := []int{2} - replicas := []int{2} - - nodes, cnr := testPlacement(selectors, replicas) - - // Node_0, PK - ip4/0.0.0.0/tcp/0 - nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") - // Node_1, PK - ip4/0.0.0.0/tcp/1 - nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") - - sdkNode := testNode(2) - sdkNode.SetAttribute("UN-LOCODE", "FI HEL") - - nodesCopy := copyVectors(nodes) - - parser, err := NewMetricsParser("/path/to/locode_db") - require.NoError(t, err) - m, err := parser.ParseMetrics([]string{geoDistance}) - require.NoError(t, err) - - tr, err := NewTraverser(context.Background(), - ForContainer(cnr), - UseBuilder(&testBuilder{ - vectors: nodesCopy, - }), - WithoutSuccessTracking(), - WithPriorityMetrics(m), - WithNodeState(&nodeState{ - node: &sdkNode, - }), - ) - require.NoError(t, err) - - // Without priority metric `$geoDistance` the order will be: - // [ {Node_0 RU MOW}, {Node_1 RU LED}] - // With priority metric `$geoDistance` the order should be: - // [ {Node_1 RU LED}, {Node_0 RU MOW}] - next := tr.Next() - require.NotNil(t, next) - require.Equal(t, 2, len(next)) - require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) - require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) - - next = tr.Next() - require.Nil(t, next) - }) } diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index e5f001d5a..a4e36c2dc 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -61,8 +61,10 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) + } else { + if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) + } } // requested tombstone not diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 2147a32fe..67ddf316f 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,7 +3,6 @@ package tombstone import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -50,7 +49,9 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) + if err != nil { + panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) + } return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 975941847..1ff07b05a 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -39,7 +38,9 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - assert.False(p.s == nil, "Tombstone source: nil object service") + if p.s == nil { + panic("Tombstone source: nil object service") + } return Source(p) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index fbdeb3148..1ee31d480 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, @@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } } else if client.IsErrObjectAlreadyRemoved(err) { restore = false - } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, @@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ Addr: addr, Obj: part, @@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index c91e7cc7c..4e8bacfec 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,13 +1,12 @@ package policer import ( - "fmt" "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" + "go.uber.org/zap" ) type objectsInWork struct { @@ -55,8 +54,12 @@ func New(opts ...Option) *Policer { opts[i](c) } + c.log = c.log.With(zap.String("component", "Object Policer")) + cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) + if err != nil { + panic(err) + } return &Policer{ cfg: c, diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index 216fe4919..bb38c72ad 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,7 +3,6 @@ package replicator import ( "context" "errors" - "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -43,7 +42,11 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - endpoints := slices.Collect(node.NetworkEndpoints()) + var endpoints []string + node.IterateNetworkEndpoints(func(s string) bool { + endpoints = append(endpoints, s) + return false + }) p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index a940cef37..6910fa5af 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -7,6 +7,7 @@ import ( objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -44,6 +45,8 @@ func New(opts ...Option) *Replicator { opts[i](c) } + c.log = c.log.With(zap.String("component", "Object Replicator")) + return &Replicator{ cfg: c, } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index f0591de71..12b221613 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,7 +33,10 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) + s.log.Debug(ctx, logs.ServingRequest, + zap.String("component", "SessionService"), + zap.String("request", "Create"), + ) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index 132d62445..d312ea0ea 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 58757ff6d..c4b03cbe6 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -22,7 +22,7 @@ import ( ) func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -53,19 +53,15 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } - resProps := map[string]string{ - nativeschema.ProperyKeyTreeID: treeID, - } - return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, resProps), + aperequest.NewResource(resourceName, make(map[string]string)), reqProps, ), nil } func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -73,7 +69,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) if err != nil { return fmt.Errorf("failed to create ape request: %w", err) } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 7b209fd47..0afc7660a 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -107,45 +107,6 @@ func TestCheckAPE(t *testing.T) { cid := cid.ID{} _ = cid.DecodeString(containerID) - t.Run("treeID rule", func(t *testing.T) { - los := inmemory.NewInmemoryLocalStorage() - mcs := inmemory.NewInmemoryMorphRuleChainStorage() - fid := newFrostfsIDProviderMock(t) - s := Service{ - cfg: cfg{ - frostfsidSubjectProvider: fid, - }, - apeChecker: checkercore.New(los, mcs, fid, &stMock{}), - } - - mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ - Rules: []chain.Rule{ - { - Status: chain.QuotaLimitReached, - Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, - Resources: chain.Resources{ - Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, - }, - Condition: []chain.Condition{ - { - Op: chain.CondStringEquals, - Kind: chain.KindResource, - Key: nativeschema.ProperyKeyTreeID, - Value: versionTreeID, - }, - }, - }, - }, - MatchType: chain.MatchTypeFirstMatch, - }) - - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) - - var chErr *checkercore.ChainRouterError - require.ErrorAs(t, err, &chErr) - require.Equal(t, chain.QuotaLimitReached, chErr.Status()) - }) - t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { los := inmemory.NewInmemoryLocalStorage() mcs := inmemory.NewInmemoryMorphRuleChainStorage() @@ -191,7 +152,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) @@ -240,7 +201,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) } diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index a11700771..70f4a843b 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -9,10 +9,15 @@ import ( "time" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { @@ -48,7 +53,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.Get(netmapAddr) + ccInt, ok := c.LRU.Get(netmapAddr) c.Unlock() if ok { @@ -66,19 +71,14 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) + cc, err := c.dialTreeService(ctx, netmapAddr) lastTry := time.Now() c.Lock() if err != nil { - c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() @@ -88,3 +88,53 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } + +func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + opts := []grpc.DialOption{ + grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), + metrics.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), + ), + grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), + metrics.NewStreamClientInterceptor(), + tracing.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), + ), + grpc.WithContextDialer(c.ds.GrpcContextDialer()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), + } + + if !netAddr.IsTLSEnabled() { + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, c.key); err != nil { + return nil, err + } + + cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 56cbcc081..4ad760846 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -3,7 +3,6 @@ package tree import ( "context" "crypto/ecdsa" - "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -42,7 +41,7 @@ type cfg struct { replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys atomic.Pointer[[][]byte] + authorizedKeys [][]byte syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage @@ -148,7 +147,10 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys.Store(fromPublicKeys(keys)) + c.authorizedKeys = nil + for _, key := range keys { + c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) + } } } diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 647f8cb30..d92c749a8 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -19,8 +19,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request") func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { var resp *Resp var outErr error - err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = callback(c, fCtx, req) + err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { + resp, outErr = callback(c, ctx, req) return true }) if err != nil { @@ -31,7 +31,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -41,15 +41,24 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - for endpoint := range n.NetworkEndpoints() { - stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { - called = true - return f(fCtx, c) - }) - if called { - break + n.IterateNetworkEndpoints(func(endpoint string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false } - } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) + + called = true + stop = f(c) + return true + }) if stop { return nil } @@ -59,19 +68,3 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } - -func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false - } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - return f(ctx, c) -} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index ee40884eb..164815c76 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -89,13 +89,29 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - for addr := range n.NetworkEndpoints() { + n.IterateNetworkEndpoints(func(addr string) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + lastAddr = addr - lastErr = s.apply(ctx, n, addr, req) - if lastErr == nil { - break + + c, err := s.cache.get(ctx, addr) + if err != nil { + lastErr = fmt.Errorf("can't create client: %w", err) + return false } - } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, lastErr = c.Apply(ctx, req) + cancel() + + return lastErr == nil + }) if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { @@ -114,26 +130,6 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } -func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - - c, err := s.cache.get(ctx, addr) - if err != nil { - return fmt.Errorf("can't create client: %w", err) - } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, err = c.Apply(ctx, req) - cancel() - return err -} - func (s *Service) replicateLoop(ctx context.Context) { for range s.replicatorWorkerCount { go s.replicationWorker(ctx) @@ -206,7 +202,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Bytes(), + Meta: op.op.Meta.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 3994d6973..eeffec08b 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -17,7 +17,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -60,7 +59,6 @@ func New(opts ...Option) *Service { s.replicatorTimeout = defaultReplicatorSendTimeout s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} - s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) @@ -87,7 +85,7 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) @@ -119,7 +117,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -163,7 +161,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -219,7 +217,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } @@ -264,7 +262,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -308,7 +306,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } @@ -379,7 +377,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } @@ -391,8 +389,8 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(fCtx, req) + err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(srv.Context(), req) return true }) if err != nil { @@ -438,8 +436,10 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") + } else { + if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") + } } ts = append(ts, m.Time) ps = append(ps, p) @@ -655,8 +655,8 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(fCtx, req) + err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(srv.Context(), req) return true }) if err != nil { @@ -687,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Bytes(), + Meta: lm.Meta.Bytes(), ChildId: lm.Child, }, }, @@ -784,15 +784,3 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } - -func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { - s.authorizedKeys.Store(fromPublicKeys(newKeys)) -} - -func fromPublicKeys(keys keys.PublicKeys) *[][]byte { - buff := make([][]byte, len(keys)) - for i, k := range keys { - buff[i] = k.Bytes() - } - return &buff -} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 8221a4546..d15438e81 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -38,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -64,7 +64,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { + if err = s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey); err != nil { return apeErr(err) } return nil @@ -95,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for _, currentKey := range *s.authorizedKeys.Load() { - if bytes.Equal(currentKey, key) { + for i := range s.authorizedKeys { + if bytes.Equal(s.authorizedKeys[i], key) { return true, nil } } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 13a5c1395..97f8a727a 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -31,8 +31,6 @@ import ( "github.com/stretchr/testify/require" ) -const versionTreeID = "version" - type dummyNetmapSource struct { netmap.Source } @@ -152,7 +150,6 @@ func TestMessageSign(t *testing.T) { apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } - s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) @@ -171,26 +168,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -203,7 +200,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -212,7 +209,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -220,7 +217,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -232,88 +229,20 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - - t.Run("omit override within bt", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenNoOverride() - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - }) - }) - - t.Run("invalid override within bearer token", func(t *testing.T) { - t.Run("personated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) - - t.Run("impersonated", func(t *testing.T) { - bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) - bt.SetImpersonate(true) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") - }) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token - bt.SetExp(10) bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) - require.NoError(t, bt.Sign(privs[0].PrivateKey)) - req.Body.BearerToken = bt.Marshal() - require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) - }) - - t.Run("impersonate but invalid signer", func(t *testing.T) { - var bt bearer.Token - bt.SetExp(10) - bt.SetImpersonate(true) - bt.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - Name: cid1.EncodeToString(), - }, - Chains: []ape.Chain{}, - }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -323,18 +252,18 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } @@ -353,25 +282,6 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token return b } -func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - b.SetAPEOverride(bearer.APEOverride{ - Target: ape.ChainTarget{ - TargetType: ape.TargetTypeContainer, - }, - Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, - }) - - return b -} - -func testBearerTokenNoOverride() bearer.Token { - var b bearer.Token - b.SetExp(currentEpoch + 1) - return b -} - func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { ruleGet := chain.Rule{ Status: chain.Allow, diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index af355639f..89450b739 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,9 +2,7 @@ package tree import ( "context" - "crypto/ecdsa" "crypto/sha256" - "crypto/tls" "errors" "fmt" "io" @@ -15,7 +13,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -25,14 +22,12 @@ import ( tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -78,8 +73,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { - resp, outErr = c.TreeList(fCtx, req) + err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { + resp, outErr = c.TreeList(ctx, req) if outErr != nil { return false } @@ -247,7 +242,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.FromBytes(lm.GetMeta()); err != nil { + if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { return err } select { @@ -299,27 +294,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, for i, n := range nodes { errGroup.Go(func() error { var nodeSynced bool - for addr := range n.NetworkEndpoints() { + n.IterateNetworkEndpoints(func(addr string) bool { var a network.Address if err := a.FromString(addr); err != nil { s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue + return false } - cc, err := dialTreeService(ctx, a, s.key, s.ds) + cc, err := s.createConnection(a) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - continue + return false } + defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - _ = cc.Close() - break - } + return true + }) close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) @@ -344,47 +339,13 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { - cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, key); err != nil { - return nil, err - } - - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} - -func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - host, isTLS, err := client.ParseURI(a.URIAddr()) - if err != nil { - return nil, err - } - - creds := insecure.NewCredentials() - if isTLS { - creds = credentials.NewTLS(&tls.Config{}) - } - - defaultOpts := []grpc.DialOption{ +func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { + return grpc.NewClient(a.URIAddr(), grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), + tracing_grpc.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), @@ -392,12 +353,10 @@ func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientC tracing_grpc.NewStreamClientInterceptor(), tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(creds), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDisableServiceConfig(), - } - - return grpc.NewClient(host, append(defaultOpts, opts...)...) + ) } // ErrAlreadySyncing is returned when a service synchronization has already @@ -441,7 +400,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cnrSource.List(ctx) + cnrs, err := s.cfg.cnrSource.List(ctx) if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index 6f114d45b..a34a17f6f 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -174,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } - if strings.EqualFold(expression, "QuotaLimitReached") { + } else if strings.EqualFold(expression, "QuotaLimitReached") { return apechain.QuotaLimitReached, nil + } else { + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 66581878a..547c8d50b 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - for key, value := range node.Attributes() { + node.IterateAttributes(func(key, value string) { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - } + }) require.Empty(t, mExp) } diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 2589ab786..923412a7f 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,7 +76,8 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - if c.shutdownTimeout <= 0 { + switch { + case c.shutdownTimeout <= 0: panicOnOptValue("shutdown timeout", c.shutdownTimeout) } diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index 6337039a9..b2942b52a 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,7 +6,6 @@ import ( "os" "text/tabwriter" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -105,7 +104,9 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - assert.NoError(err, "produced incorrect base58 value") + if err != nil { + panic("produced incorrect base58 value") + } return hex.EncodeToString(val) } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index a1998cb1a..2eb5e5538 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -13,10 +13,8 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - z *zap.Logger - c zapcore.Core - t Tag - w bool + z *zap.Logger + lvl zap.AtomicLevel } // Prm groups Logger's parameters. @@ -25,8 +23,16 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// See also Logger.Reload, SetLevelString. +// Passing Prm after a successful connection via the NewLogger, connects +// the Prm to a new instance of the Logger. +// +// See also Reload, SetLevelString. type Prm struct { + // link to the created Logger + // instance; used for a runtime + // reconfiguration + _log *Logger + // support runtime rereading level zapcore.Level @@ -38,12 +44,6 @@ type Prm struct { // PrependTimestamp specifies whether to prepend a timestamp in the log PrependTimestamp bool - - // Options for zap.Logger - Options []zap.Option - - // map of tag's bit masks to log level, overrides lvl - tl map[Tag]zapcore.Level } const ( @@ -73,10 +73,20 @@ func (p *Prm) SetDestination(d string) error { return nil } -// SetTags parses list of tags with log level. -func (p *Prm) SetTags(tags [][]string) (err error) { - p.tl, err = parseTags(tags) - return err +// Reload reloads configuration of a connected instance of the Logger. +// Returns ErrLoggerNotConnected if no connection has been performed. +// Returns any reconfiguration error from the Logger directly. +func (p Prm) Reload() error { + if p._log == nil { + // incorrect logger usage + panic("parameters are not connected to any Logger") + } + + return p._log.reload(p) +} + +func defaultPrm() *Prm { + return new(Prm) } // NewLogger constructs a new zap logger instance. Constructing with nil @@ -90,7 +100,10 @@ func (p *Prm) SetTags(tags [][]string) (err error) { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm Prm) (*Logger, error) { +func NewLogger(prm *Prm) (*Logger, error) { + if prm == nil { + prm = defaultPrm() + } switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -101,9 +114,11 @@ func NewLogger(prm Prm) (*Logger, error) { } } -func newConsoleLogger(prm Prm) (*Logger, error) { +func newConsoleLogger(prm *Prm) (*Logger, error) { + lvl := zap.NewAtomicLevelAt(prm.level) + c := zap.NewProductionConfig() - c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) + c.Level = lvl c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -115,22 +130,23 @@ func newConsoleLogger(prm Prm) (*Logger, error) { c.EncoderConfig.TimeKey = "" } - opts := []zap.Option{ + lZap, err := c.Build( zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap, err := c.Build(opts...) + ) if err != nil { return nil, err } - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) + + l := &Logger{z: lZap, lvl: lvl} + prm._log = l return l, nil } -func newJournaldLogger(prm Prm) (*Logger, error) { +func newJournaldLogger(prm *Prm) (*Logger, error) { + lvl := zap.NewAtomicLevelAt(prm.level) + c := zap.NewProductionConfig() if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -144,7 +160,7 @@ func newJournaldLogger(prm Prm) (*Logger, error) { encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), @@ -162,82 +178,29 @@ func newJournaldLogger(prm Prm) (*Logger, error) { c.Sampling.Thereafter, samplerOpts..., ) - opts := []zap.Option{ - zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), - zap.AddCallerSkip(1), - } - opts = append(opts, prm.Options...) - lZap := zap.New(samplingCore, opts...) - l := &Logger{z: lZap, c: lZap.Core()} - l = l.WithTag(TagMain) + lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) + + l := &Logger{z: lZap, lvl: lvl} + prm._log = l return l, nil } -// With create a child logger with new fields, don't affect the parent. -// Throws panic if tag is unset. +func (l *Logger) reload(prm Prm) error { + l.lvl.SetLevel(prm.level) + return nil +} + +func (l *Logger) WithOptions(options ...zap.Option) { + l.z = l.z.WithOptions(options...) +} + func (l *Logger) With(fields ...zap.Field) *Logger { - if l.t == 0 { - panic("tag is unset") - } - c := *l - c.z = l.z.With(fields...) - // With called under the logger - c.w = true - return &c -} - -type core struct { - c zapcore.Core - l zap.AtomicLevel -} - -func (c *core) Enabled(lvl zapcore.Level) bool { - return c.l.Enabled(lvl) -} - -func (c *core) With(fields []zapcore.Field) zapcore.Core { - clone := *c - clone.c = clone.c.With(fields) - return &clone -} - -func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return c.c.Check(e, ce) -} - -func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { - return c.c.Write(e, fields) -} - -func (c *core) Sync() error { - return c.c.Sync() -} - -// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. -// Throws panic if provided unsupported tag. -func (l *Logger) WithTag(tag Tag) *Logger { - if tag == 0 || tag > Tag(len(_Tag_index)-1) { - panic("unsupported tag " + tag.String()) - } - if l.w { - panic("unsupported operation for the logger's state") - } - c := *l - c.t = tag - c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { - return &core{ - c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), - l: tagToLogLevel[tag], - } - })) - return &c + return &Logger{z: l.z.With(fields...)} } func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), - t: TagMain, - c: z.Core(), } } diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go deleted file mode 100644 index b867ee6cc..000000000 --- a/pkg/util/logger/logger_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logger - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest/observer" -) - -func BenchmarkLogger(b *testing.B) { - ctx := context.Background() - m := map[string]Prm{} - - prm := Prm{} - require.NoError(b, prm.SetLevelString("debug")) - m["logging enabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - m["logging disabled"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("error")) - require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) - m["logging enabled via tags"] = prm - - prm = Prm{} - require.NoError(b, prm.SetLevelString("debug")) - require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) - m["logging disabled via tags"] = prm - - for k, v := range m { - b.Run(k, func(b *testing.B) { - logger, err := createLogger(v) - require.NoError(b, err) - UpdateLevelForTags(v) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - logger.Info(ctx, "test info") - } - }) - } -} - -type testCore struct { - core zapcore.Core -} - -func (c *testCore) Enabled(lvl zapcore.Level) bool { - return c.core.Enabled(lvl) -} - -func (c *testCore) With(fields []zapcore.Field) zapcore.Core { - c.core = c.core.With(fields) - return c -} - -func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - return ce.AddCore(e, c) -} - -func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { - return nil -} - -func (c *testCore) Sync() error { - return c.core.Sync() -} - -func createLogger(prm Prm) (*Logger, error) { - prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { - tc := testCore{core: core} - return &tc - })} - return NewLogger(prm) -} - -func TestLoggerOutput(t *testing.T) { - obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) - - prm := Prm{} - require.NoError(t, prm.SetLevelString("debug")) - prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { - return obs - })} - loggerMain, err := NewLogger(prm) - require.NoError(t, err) - UpdateLevelForTags(prm) - - loggerMainWith := loggerMain.With(zap.String("key", "value")) - - require.Panics(t, func() { - loggerMainWith.WithTag(TagShard) - }) - loggerShard := loggerMain.WithTag(TagShard) - loggerShard = loggerShard.With(zap.String("key1", "value1")) - - loggerMorph := loggerMain.WithTag(TagMorph) - loggerMorph = loggerMorph.With(zap.String("key2", "value2")) - - ctx := context.Background() - loggerMain.Debug(ctx, "main") - loggerMainWith.Debug(ctx, "main with") - loggerShard.Debug(ctx, "shard") - loggerMorph.Debug(ctx, "morph") - - require.Len(t, logs.All(), 4) - require.Len(t, logs.FilterFieldKey("key").All(), 1) - require.Len(t, logs.FilterFieldKey("key1").All(), 1) - require.Len(t, logs.FilterFieldKey("key2").All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) - require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) - require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) -} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result deleted file mode 100644 index 612fa2967..000000000 --- a/pkg/util/logger/logger_test.result +++ /dev/null @@ -1,46 +0,0 @@ -goos: linux -goarch: amd64 -pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger -cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz -BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op -BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op -PASS -ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go deleted file mode 100644 index 1b98f2e62..000000000 --- a/pkg/util/logger/tag_string.go +++ /dev/null @@ -1,43 +0,0 @@ -// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. - -package logger - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[TagMain-1] - _ = x[TagMorph-2] - _ = x[TagGrpcSvc-3] - _ = x[TagIr-4] - _ = x[TagProcessor-5] - _ = x[TagEngine-6] - _ = x[TagBlobovnicza-7] - _ = x[TagBlobovniczaTree-8] - _ = x[TagBlobstor-9] - _ = x[TagFSTree-10] - _ = x[TagGC-11] - _ = x[TagShard-12] - _ = x[TagWriteCache-13] - _ = x[TagDeleteSvc-14] - _ = x[TagGetSvc-15] - _ = x[TagSearchSvc-16] - _ = x[TagSessionSvc-17] - _ = x[TagTreeSvc-18] - _ = x[TagPolicer-19] - _ = x[TagReplicator-20] -} - -const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" - -var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} - -func (i Tag) String() string { - i -= 1 - if i >= Tag(len(_Tag_index)-1) { - return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] -} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go deleted file mode 100644 index a5386707e..000000000 --- a/pkg/util/logger/tags.go +++ /dev/null @@ -1,94 +0,0 @@ -package logger - -import ( - "fmt" - "strings" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -//go:generate stringer -type Tag -linecomment - -type Tag uint8 - -const ( - _ Tag = iota // - TagMain // main - TagMorph // morph - TagGrpcSvc // grpcsvc - TagIr // ir - TagProcessor // processor - TagEngine // engine - TagBlobovnicza // blobovnicza - TagBlobovniczaTree // blobovniczatree - TagBlobstor // blobstor - TagFSTree // fstree - TagGC // gc - TagShard // shard - TagWriteCache // writecache - TagDeleteSvc // deletesvc - TagGetSvc // getsvc - TagSearchSvc // searchsvc - TagSessionSvc // sessionsvc - TagTreeSvc // treesvc - TagPolicer // policer - TagReplicator // replicator - - defaultLevel = zapcore.InfoLevel -) - -var ( - tagToLogLevel = map[Tag]zap.AtomicLevel{} - stringToTag = map[string]Tag{} -) - -func init() { - for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { - tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) - stringToTag[i.String()] = i - } -} - -// parseTags returns: -// - map(always instantiated) of tag to custom log level for that tag; -// - error if it occurred(map is empty). -func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { - m := make(map[Tag]zapcore.Level) - if len(raw) == 0 { - return m, nil - } - for _, item := range raw { - str, level := item[0], item[1] - if len(level) == 0 { - // It is not necessary to parse tags without level, - // because default log level will be used. - continue - } - var l zapcore.Level - err := l.UnmarshalText([]byte(level)) - if err != nil { - return nil, err - } - tmp := strings.Split(str, ",") - for _, tagStr := range tmp { - tag, ok := stringToTag[strings.TrimSpace(tagStr)] - if !ok { - return nil, fmt.Errorf("unsupported tag %s", str) - } - m[tag] = l - } - } - return m, nil -} - -func UpdateLevelForTags(prm Prm) { - for k, v := range tagToLogLevel { - nk, ok := prm.tl[k] - if ok { - v.SetLevel(nk) - } else { - v.SetLevel(prm.level) - } - } -} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go deleted file mode 100644 index 7373e538f..000000000 --- a/pkg/util/testing/netmap_source.go +++ /dev/null @@ -1,36 +0,0 @@ -package testing - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -var ( - errInvalidDiff = errors.New("invalid diff") - errNetmapNotFound = errors.New("netmap not found") -) - -type TestNetmapSource struct { - Netmaps map[uint64]*netmap.NetMap - CurrentEpoch uint64 -} - -func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.CurrentEpoch { - return nil, errInvalidDiff - } - return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) -} - -func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.Netmaps[epoch]; found { - return nm, nil - } - return nil, errNetmapNotFound -} - -func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { - return s.CurrentEpoch, nil -} diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index 39a420358..f2f8881cf 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -1,10 +1,8 @@ package internal import ( - cryptorand "crypto/rand" "crypto/sha256" "fmt" - "math/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -16,13 +14,14 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" + "golang.org/x/exp/rand" ) func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte for range count { payload := make([]byte, size) - _, _ = cryptorand.Read(payload) + _, _ = rand.Read(payload) pool = append(pool, payload) }