Compare commits

...

44 commits

Author SHA1 Message Date
7456c8556a [#536] blobovnicza: Add blobovniczatree DB cache
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-09-01 13:53:11 +03:00
c672f59ab8 [#536] blobovnicza: Drop cache
Each blobovnicza instance is opened
while is in use.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-09-01 13:51:26 +03:00
b9b86d2ec8 [#666] shard/test: Fix data race in metrics tests
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-31 08:39:42 +00:00
4dff9555f1 [#568] writecache: Improve flushing scheme for badger
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-30 17:22:28 +00:00
806cc13d9f [#658] client: Refactor PrmObjectGet/Head/Range usage
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-30 17:13:23 +00:00
1daef2ceeb [#660] writecache: Fix remaining addr2key uses
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-30 17:12:33 +00:00
fe5aa06a75 [#665] node: Bind length of copies number to number of replicas
Allow to use one digit in copies number array for backward compatibility.

Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2023-08-30 17:11:55 +00:00
91f3745b58 [#659] debian: Remove nspcc email
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-30 08:29:26 +00:00
7654847f79 [#659] adm: Remove nspcc.ru from the default email
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-30 08:29:26 +00:00
a724debb19 [#632] .forgejo: Print --version
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-29 12:41:45 +03:00
55b82e744b [#529] objectcore: Use common sender classifier
Use common sender classifier for ACL service and format validator.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-29 10:33:06 +03:00
ae81d6660a [#529] objectcore: Fix object content validation
There are old objects where the owner of the object
may not match the one who issued the token.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-29 10:33:06 +03:00
ab2614ec2d [#528] objectcore: Validate token issuer
Add token issuer against object owner validation.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-29 10:10:10 +03:00
4ea0df77d0 [#574] policer: Check if the container was really removed
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-28 14:21:38 +00:00
554ff2c06b [#574] core: Extend Source interface with DeletionInfo method
* Introduce common method EverExisted
* Define DeletionInfo for struct that must implement Source
* Refactor tree srv

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-28 14:21:38 +00:00
9072772a09 [#649] shard/test: Increase GC remover interval
This was set in #348 to speed up tests.
It seems 100ms doesn't increase overall test time,
but it reduces the amount of logs by 100x factor.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:10:25 +00:00
c4db8e7690 [#637] shard/test: Fix data race
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:10:25 +00:00
f8ba60aa0c [#648] objsvc/delete: Handle errors in Go style
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 09:45:35 +00:00
d2084ece41 [#648] objsvc/delete: Remove redundant logs
We never propagate delete requests to the container node, because
tombstone broadcast is done via PUT. No need to pollute logs.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 09:45:35 +00:00
40b556fc19 [#647] objsvc/search: Improve testing coverage
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:40:01 +03:00
4db2cbc927 [#647] objsvc/search: Wrap in uniqueIDWriter during parameter setting
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:40:01 +03:00
966ad22abf [#647] objsvc/search: Simplify error handling
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:40:01 +03:00
56f841b022 [#647] objsvc/search: Remove TraverserGenerator wrapper
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:40:01 +03:00
ba58144de1 [#647] objsvc/search: Remove netmap.Source wrapper
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-25 10:40:01 +03:00
c9e3c9956e [#643] objsvc/put: Unify extraBroadcastEnabled usage
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-24 11:03:17 +03:00
facd3b2c4b [#643] objsvc/put: Unify placement iterators
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-24 11:03:17 +03:00
3fcf56f2fb [#643] objsvc/put: Copy config to distributedTarget
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-24 11:03:17 +03:00
96e690883f [#638] Unify test loggers
In some places we have debug=false, in others debug=true.
Let's be consistent.

Semantic patch:
```
@@
@@
-test.NewLogger(..., false)
+test.NewLogger(..., true)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-23 11:21:05 +00:00
322c1dc273 [#638] Use test.NewLogger() in tests
Semantic patch (restricted to **/*_test.go):
```
@@
@@
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
-import "go.uber.org/zap"
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"

-&logger.Logger{Logger: zap.L()}
+test.NewLogger(t, false)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-23 11:21:05 +00:00
02b03d9c4f [#638] logger: Remove sampling from test loggers
Losing logs is always a bad idea, especially when we debug tests.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-23 11:21:05 +00:00
82cc453be9 [#xx] shard: Fix data race in metrics tests
Protect test metric store fields with a mutex. Probably, not every field
should be protected, but better safe than sorry.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-23 10:26:12 +00:00
238b8f10a0 [#630] cli: Fix SDK SetEACLPrm usage for PrmContainerSetEACL
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-22 14:25:39 +00:00
345a1a69a2 [#635] Use internal key type when deleting from badger wc
Signed-off-by: Alejandro Lopez <a.lopez@yadro.com>
2023-08-22 10:53:19 +03:00
dc3bc08c07 [#631] lens: Fix db type flag name
Typo from 1a0cb0f34a.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-21 17:18:05 +00:00
23be3eb627 [#574] tree: Check if container is really removed
* Use DeletionInfo method from morph client to check if
  the container has been really removed from neo-go

Signed-off-by: Airat Arifullin a.arifullin@yadro.com
2023-08-21 12:50:20 +03:00
42fb6fb372 [#574] morph: Add DeletionInfo method for morph client
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-21 12:49:06 +03:00
62c2ad4b22 [#626] logs: Remove autogenerated comments
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2023-08-21 11:15:06 +03:00
84ea075587 [#625] cli: Fix SDK EACLPrm usage for PrmContainerEACL
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2023-08-21 10:36:47 +03:00
354a92ea2c [#602] blobovnicza: Add leaf width implementation
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-21 10:27:32 +03:00
d3904ec599 [#602] config: Add blobovnicza leaf width parameter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-21 10:27:32 +03:00
4d9a6c07fb [#618] core: Replace fmt.Sprintf with strconv.FormatUint
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-21 07:14:50 +00:00
a1f1d233cc [#618] linters: bump truecloudlab-linters to 0.0.2
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2023-08-21 07:14:50 +00:00
f2811f8585 [#602] metrics: Add blobovnicza items counter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-18 13:01:27 +03:00
c4e1d8eb07 [#602] node: Fix blobovnicza typos
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-08-18 11:14:10 +03:00
113 changed files with 2806 additions and 2023 deletions

View file

@ -24,6 +24,7 @@ jobs:
- name: Build CLI - name: Build CLI
run: make bin/frostfs-cli run: make bin/frostfs-cli
- run: bin/frostfs-cli --version
- name: Build NODE - name: Build NODE
run: make bin/frostfs-node run: make bin/frostfs-node
@ -33,6 +34,8 @@ jobs:
- name: Build ADM - name: Build ADM
run: make bin/frostfs-adm run: make bin/frostfs-adm
- run: bin/frostfs-adm --version
- name: Build LENS - name: Build LENS
run: make bin/frostfs-lens run: make bin/frostfs-lens
- run: bin/frostfs-lens --version

View file

@ -38,10 +38,11 @@ linters-settings:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK alias: objectSDK
custom: custom:
noliteral: truecloudlab-linters:
path: bin/external_linters.so path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git original-url: git.frostfs.info/TrueCloudLab/linters.git
settings: settings:
noliteral:
target-methods : ["reportFlushError", "reportError"] target-methods : ["reportFlushError", "reportError"]
disable-packages: ["codes", "err", "res","exec"] disable-packages: ["codes", "err", "res","exec"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -77,6 +78,6 @@ linters:
- gocognit - gocognit
- contextcheck - contextcheck
- importas - importas
- noliteral - truecloudlab-linters
disable-all: true disable-all: true
fast: false fast: false

View file

@ -9,6 +9,7 @@ HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.21 GO_VERSION ?= 1.21
LINT_VERSION ?= 1.54.0 LINT_VERSION ?= 1.54.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
ARCH = amd64 ARCH = amd64
BIN = bin BIN = bin
@ -26,7 +27,7 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed "s/-/~/")-${OS_RELEASE} sed "s/-/~/")-${OS_RELEASE}
OUTPUT_LINT_DIR ?= $(shell pwd)/bin OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION) LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache TMP_DIR := .cache
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint .PHONY: help all images dep clean fmts fmt imports test lint docker/lint
@ -139,7 +140,7 @@ pre-commit-run:
lint-install: lint-install:
@mkdir -p $(TMP_DIR) @mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@git clone --depth 1 https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters @rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true @rmdir $(TMP_DIR) 2>/dev/null || true

View file

@ -145,12 +145,12 @@ func registerNNS(nnsCs *state.Contract, c *initializeContext, zone string, domai
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
zone, c.CommitteeAcc.Contract.ScriptHash(), zone, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600)) frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(), domain, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600)) frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
} else { } else {
s, ok, err := c.nnsRegisterDomainScript(nnsCs.Hash, cs.Hash, domain) s, ok, err := c.nnsRegisterDomainScript(nnsCs.Hash, cs.Hash, domain)

View file

@ -27,6 +27,8 @@ import (
const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
const frostfsOpsEmail = "ops@frostfs.info"
func (c *initializeContext) setNNS() error { func (c *initializeContext) setNNS() error {
nnsCs, err := c.Client.GetContractStateByID(1) nnsCs, err := c.Client.GetContractStateByID(1)
if err != nil { if err != nil {
@ -40,7 +42,7 @@ func (c *initializeContext) setNNS() error {
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All, emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
"frostfs", c.CommitteeAcc.Contract.ScriptHash(), "frostfs", c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600)) frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if err := c.sendCommitteeTx(bw.Bytes(), true); err != nil { if err := c.sendCommitteeTx(bw.Bytes(), true); err != nil {
return fmt.Errorf("can't add domain root to NNS: %w", err) return fmt.Errorf("can't add domain root to NNS: %w", err)
@ -122,7 +124,7 @@ func (c *initializeContext) emitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHas
if isAvail { if isAvail {
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All, emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
morphClient.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(), morphClient.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600)) frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
} }
@ -170,7 +172,7 @@ func (c *initializeContext) nnsRegisterDomainScript(nnsHash, expectedHash util.U
bw := io.NewBufBinWriter() bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All, emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(), domain, c.CommitteeAcc.Contract.ScriptHash(),
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600)) frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT) emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil { if bw.Err != nil {

View file

@ -176,8 +176,8 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
// EACLPrm groups parameters of EACL operation. // EACLPrm groups parameters of EACL operation.
type EACLPrm struct { type EACLPrm struct {
commonPrm Client *client.Client
client.PrmContainerEACL ClientParams client.PrmContainerEACL
} }
// EACLRes groups the resulting values of EACL operation. // EACLRes groups the resulting values of EACL operation.
@ -194,15 +194,15 @@ func (x EACLRes) EACL() eacl.Table {
// //
// Returns any error which prevented the operation from completing correctly in error return. // Returns any error which prevented the operation from completing correctly in error return.
func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) { func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
res.cliRes, err = prm.cli.ContainerEACL(ctx, prm.PrmContainerEACL) res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
return return
} }
// SetEACLPrm groups parameters of SetEACL operation. // SetEACLPrm groups parameters of SetEACL operation.
type SetEACLPrm struct { type SetEACLPrm struct {
commonPrm Client *client.Client
client.PrmContainerSetEACL ClientParams client.PrmContainerSetEACL
} }
// SetEACLRes groups the resulting values of SetEACL operation. // SetEACLRes groups the resulting values of SetEACL operation.
@ -217,7 +217,7 @@ type SetEACLRes struct{}
// //
// Returns any error which prevented the operation from completing correctly in error return. // Returns any error which prevented the operation from completing correctly in error return.
func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) { func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
_, err = prm.cli.ContainerSetEACL(ctx, prm.PrmContainerSetEACL) _, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
return return
} }
@ -563,28 +563,19 @@ func (x GetObjectRes) Header() *objectSDK.Object {
// Returns any error which prevented the operation from completing correctly in error return. // Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual. // For raw reading, returns *object.SplitInfoError error if object is virtual.
func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
var getPrm client.PrmObjectGet cnr := prm.objAddr.Container()
getPrm.FromContainer(prm.objAddr.Container()) obj := prm.objAddr.Object()
getPrm.ByID(prm.objAddr.Object())
if prm.sessionToken != nil { getPrm := client.PrmObjectGet{
getPrm.WithinSession(*prm.sessionToken) XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
} }
if prm.bearerToken != nil {
getPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
getPrm.MarkRaw()
}
if prm.local {
getPrm.MarkLocal()
}
getPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectGetInit(ctx, getPrm) rdr, err := prm.cli.ObjectGetInit(ctx, getPrm)
if err != nil { if err != nil {
return nil, fmt.Errorf("init object reading on client: %w", err) return nil, fmt.Errorf("init object reading on client: %w", err)
@ -639,29 +630,20 @@ func (x HeadObjectRes) Header() *objectSDK.Object {
// Returns any error which prevented the operation from completing correctly in error return. // Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual. // For raw reading, returns *object.SplitInfoError error if object is virtual.
func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) { func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
var cliPrm client.PrmObjectHead cnr := prm.objAddr.Container()
cliPrm.FromContainer(prm.objAddr.Container()) obj := prm.objAddr.Object()
cliPrm.ByID(prm.objAddr.Object())
if prm.sessionToken != nil { headPrm := client.PrmObjectHead{
cliPrm.WithinSession(*prm.sessionToken) XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
} }
if prm.bearerToken != nil { res, err := prm.cli.ObjectHead(ctx, headPrm)
cliPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
cliPrm.MarkRaw()
}
if prm.local {
cliPrm.MarkLocal()
}
cliPrm.WithXHeaders(prm.xHeaders...)
res, err := prm.cli.ObjectHead(ctx, cliPrm)
if err != nil { if err != nil {
return nil, fmt.Errorf("read object header via client: %w", err) return nil, fmt.Errorf("read object header via client: %w", err)
} }
@ -862,32 +844,22 @@ type PayloadRangeRes struct{}
// Returns any error which prevented the operation from completing correctly in error return. // Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual. // For raw reading, returns *object.SplitInfoError error if object is virtual.
func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) { func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
var cliPrm client.PrmObjectRange cnr := prm.objAddr.Container()
cliPrm.FromContainer(prm.objAddr.Container()) obj := prm.objAddr.Object()
cliPrm.ByID(prm.objAddr.Object())
if prm.sessionToken != nil { rangePrm := client.PrmObjectRange{
cliPrm.WithinSession(*prm.sessionToken) XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
Offset: prm.rng.GetOffset(),
Length: prm.rng.GetLength(),
} }
if prm.bearerToken != nil { rdr, err := prm.cli.ObjectRangeInit(ctx, rangePrm)
cliPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
cliPrm.MarkRaw()
}
if prm.local {
cliPrm.MarkLocal()
}
cliPrm.SetOffset(prm.rng.GetOffset())
cliPrm.SetLength(prm.rng.GetLength())
cliPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectRangeInit(ctx, cliPrm)
if err != nil { if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err) return nil, fmt.Errorf("init payload reading: %w", err)
} }

View file

@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -20,9 +21,12 @@ var getExtendedACLCmd = &cobra.Command{
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
var eaclPrm internalclient.EACLPrm eaclPrm := internalclient.EACLPrm{
eaclPrm.SetClient(cli) Client: cli,
eaclPrm.SetContainer(id) ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
res, err := internalclient.EACL(cmd.Context(), eaclPrm) res, err := internalclient.EACL(cmd.Context(), eaclPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err) commonCmd.ExitOnErr(cmd, "rpc error: %w", err)

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -48,12 +49,12 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
cmd.Println("ACL extension is enabled in the container, continue processing.") cmd.Println("ACL extension is enabled in the container, continue processing.")
} }
var setEACLPrm internalclient.SetEACLPrm setEACLPrm := internalclient.SetEACLPrm{
setEACLPrm.SetClient(cli) Client: cli,
setEACLPrm.SetTable(*eaclTable) ClientParams: client.PrmContainerSetEACL{
Table: eaclTable,
if tok != nil { Session: tok,
setEACLPrm.WithinSession(*tok) },
} }
_, err := internalclient.SetEACL(cmd.Context(), setEACLPrm) _, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
@ -65,9 +66,12 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
cmd.Println("awaiting...") cmd.Println("awaiting...")
var getEACLPrm internalclient.EACLPrm getEACLPrm := internalclient.EACLPrm{
getEACLPrm.SetClient(cli) Client: cli,
getEACLPrm.SetContainer(id) ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
for i := 0; i < awaitTimeout; i++ { for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)

View file

@ -37,6 +37,6 @@ func AddOutputFileFlag(cmd *cobra.Command, v *string) {
// AddDBTypeFlag adds the DB type flag to the passed cobra command. // AddDBTypeFlag adds the DB type flag to the passed cobra command.
func AddDBTypeFlag(cmd *cobra.Command, v *string) { func AddDBTypeFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagOutFile, "bbolt", cmd.Flags().StringVar(v, flagDBType, "bbolt",
"Type of DB used by write cache (default: bbolt)") "Type of DB used by write cache (default: bbolt)")
} }

View file

@ -142,7 +142,8 @@ func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
// wrapper over TTL cache of values read from the network // wrapper over TTL cache of values read from the network
// that implements container storage. // that implements container storage.
type ttlContainerStorage struct { type ttlContainerStorage struct {
*ttlNetCache[cid.ID, *container.Container] containerCache *ttlNetCache[cid.ID, *container.Container]
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
} }
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage { func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
@ -151,18 +152,31 @@ func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContain
lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) { lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id) return v.Get(id)
}) })
lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(id)
})
return ttlContainerStorage{lruCnrCache} return ttlContainerStorage{
containerCache: lruCnrCache,
delInfoCache: lruDelInfoCache,
}
} }
func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
s.set(cnr, nil, new(apistatus.ContainerNotFound)) s.containerCache.set(cnr, nil, new(apistatus.ContainerNotFound))
// The removal invalidates possibly stored error response.
s.delInfoCache.remove(cnr)
} }
// Get returns container value from the cache. If value is missing in the cache // Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache. // or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) { func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.get(cnr) return s.containerCache.get(cnr)
}
func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(cnr)
} }
type ttlEACLStorage struct { type ttlEACLStorage struct {

View file

@ -174,6 +174,7 @@ type subStorageCfg struct {
// blobovnicza-specific // blobovnicza-specific
size uint64 size uint64
width uint64 width uint64
leafWidth uint64
openedCacheSize int openedCacheSize int
} }
@ -288,6 +289,7 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
sCfg.size = sub.Size() sCfg.size = sub.Size()
sCfg.depth = sub.ShallowDepth() sCfg.depth = sub.ShallowDepth()
sCfg.width = sub.ShallowWidth() sCfg.width = sub.ShallowWidth()
sCfg.leafWidth = sub.LeafWidth()
sCfg.openedCacheSize = sub.OpenedCacheSize() sCfg.openedCacheSize = sub.OpenedCacheSize()
case fstree.Type: case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storagesCfg[i])) sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
@ -509,6 +511,8 @@ type cfgObject struct {
cfgLocalStorage cfgLocalStorage cfgLocalStorage cfgLocalStorage
tombstoneLifetime uint64 tombstoneLifetime uint64
skipSessionTokenIssuerVerification bool
} }
type cfgNotifications struct { type cfgNotifications struct {
@ -677,6 +681,7 @@ func initCfgObject(appCfg *config.Config) cfgObject {
return cfgObject{ return cfgObject{
pool: initObjectPool(appCfg), pool: initObjectPool(appCfg),
tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg), tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
} }
} }
@ -774,6 +779,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
blobovniczatree.WithBlobovniczaSize(sRead.size), blobovniczatree.WithBlobovniczaSize(sRead.size),
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth), blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width), blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize), blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
blobovniczatree.WithLogger(c.log), blobovniczatree.WithLogger(c.log),
} }

View file

@ -94,6 +94,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth()) require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth()) require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize()) require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/0/blob", ss[1].Path()) require.Equal(t, "tmp/0/blob", ss[1].Path())
require.EqualValues(t, 0644, ss[1].Perm()) require.EqualValues(t, 0644, ss[1].Perm())
@ -142,6 +143,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth()) require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth()) require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize()) require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/1/blob", ss[1].Path()) require.Equal(t, "tmp/1/blob", ss[1].Path())
require.EqualValues(t, 0644, ss[1].Perm()) require.EqualValues(t, 0644, ss[1].Perm())

View file

@ -102,3 +102,13 @@ func (x *Config) OpenedCacheSize() int {
func (x *Config) BoltDB() *boltdbconfig.Config { func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x) return (*boltdbconfig.Config)(x)
} }
// LeafWidth returns the value of "leaf_width" config parameter.
//
// Returns 0 if the value is not a positive number.
func (x *Config) LeafWidth() uint64 {
return config.UintSafe(
(*config.Config)(x),
"leaf_width",
)
}

View file

@ -51,3 +51,8 @@ func (g PutConfig) PoolSizeLocal() int {
return PutPoolSizeDefault return PutPoolSizeDefault
} }
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
}

View file

@ -16,6 +16,7 @@ func TestObjectSection(t *testing.T) {
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
}) })
const path = "../../../../config/example/node" const path = "../../../../config/example/node"
@ -24,6 +25,7 @@ func TestObjectSection(t *testing.T) {
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
} }
configtest.ForEachFileType(path, fileConfigTest) configtest.ForEachFileType(path, fileConfigTest)

View file

@ -113,7 +113,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
c.cfgObject.eaclSource = eACLFetcher c.cfgObject.eaclSource = eACLFetcher
cnrRdr.eacl = eACLFetcher cnrRdr.eacl = eACLFetcher
c.cfgObject.cnrSource = cnrSrc c.cfgObject.cnrSource = cnrSrc
cnrRdr.get = cnrSrc cnrRdr.src = cnrSrc
cnrRdr.lister = client cnrRdr.lister = client
} else { } else {
// use RPC node as source of Container contract items (with caching) // use RPC node as source of Container contract items (with caching)
@ -131,7 +131,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnr, err := cnrSrc.Get(ev.ID) cnr, err := cnrSrc.Get(ev.ID)
if err == nil { if err == nil {
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true) cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
cachedContainerStorage.set(ev.ID, cnr, nil) cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
} else { } else {
// unlike removal, we expect successful receive of the container // unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful // after successful creation, so logging can be useful
@ -159,7 +159,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
} }
cachedContainerStorage.handleRemoval(ev.ID) cachedContainerStorage.handleRemoval(ev.ID)
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID), zap.Stringer("id", ev.ID),
) )
@ -170,7 +169,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = cachedContainerLister cnrRdr.lister = cachedContainerLister
cnrRdr.eacl = c.cfgObject.eaclSource cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.get = c.cfgObject.cnrSource cnrRdr.src = c.cfgObject.cnrSource
cnrWrt.cacheEnabled = true cnrWrt.cacheEnabled = true
cnrWrt.eacls = cachedEACLStorage cnrWrt.eacls = cachedEACLStorage
@ -641,7 +640,7 @@ func (c *usedSpaceService) processLoadValue(_ context.Context, a containerSDK.Si
type morphContainerReader struct { type morphContainerReader struct {
eacl containerCore.EACLSource eacl containerCore.EACLSource
get containerCore.Source src containerCore.Source
lister interface { lister interface {
List(*user.ID) ([]cid.ID, error) List(*user.ID) ([]cid.ID, error)
@ -649,7 +648,11 @@ type morphContainerReader struct {
} }
func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) { func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.get.Get(id) return x.src.Get(id)
}
func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(id)
} }
func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) { func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) {

View file

@ -160,8 +160,9 @@ func initObjectService(c *cfg) {
addPolicer(c, keyStorage, c.bgClientCache) addPolicer(c, keyStorage, c.bgClientCache)
traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c) traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c)
irFetcher := newCachedIRFetcher(createInnerRingFetcher(c))
sPut := createPutSvc(c, keyStorage) sPut := createPutSvc(c, keyStorage, &irFetcher)
sPutV2 := createPutSvcV2(sPut, keyStorage) sPutV2 := createPutSvcV2(sPut, keyStorage)
@ -184,7 +185,7 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2) splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2)
aclSvc := createACLServiceV2(c, splitSvc) aclSvc := createACLServiceV2(c, splitSvc, &irFetcher)
var commonSvc objectService.Common var commonSvc objectService.Common
commonSvc.Init(&c.internals, aclSvc) commonSvc.Init(&c.internals, aclSvc)
@ -295,7 +296,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
) )
} }
func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service { func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage ls := c.cfgObject.cfgLocalStorage.localStorage
var os putsvc.ObjectStorage = engineWithoutNotifications{ var os putsvc.ObjectStorage = engineWithoutNotifications{
@ -320,8 +321,10 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service {
c.netMapSource, c.netMapSource,
c, c,
c.cfgNetmap.state, c.cfgNetmap.state,
irFetcher,
putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
putsvc.WithLogger(c.log), putsvc.WithLogger(c.log),
putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
) )
} }
@ -405,14 +408,13 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
) )
} }
func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter) v2.Service { func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter, irFetcher *cachedIRFetcher) v2.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage ls := c.cfgObject.cfgLocalStorage.localStorage
irFetcher := createInnerRingFetcher(c)
return v2.New( return v2.New(
splitSvc, splitSvc,
c.netMapSource, c.netMapSource,
newCachedIRFetcher(irFetcher), irFetcher,
acl.NewChecker( acl.NewChecker(
c.cfgNetmap.state, c.cfgNetmap.state,
c.cfgObject.eaclSource, c.cfgObject.eaclSource,

View file

@ -31,6 +31,10 @@ func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
return c.src.Get(id) return c.src.Get(id)
} }
func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(cid)
}
func (c cnrSource) List() ([]cid.ID, error) { func (c cnrSource) List() ([]cid.ID, error) {
return c.cli.ContainersOf(nil) return c.cli.ContainersOf(nil)
} }

View file

@ -86,6 +86,7 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
# Object service section # Object service section
FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100 FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100
FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200 FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
# Storage engine section # Storage engine section
@ -121,6 +122,7 @@ FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1 FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4 FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50 FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config ### FSTree config
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob
@ -167,6 +169,7 @@ FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1 FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4 FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50 FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config ### FSTree config
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob

View file

@ -130,7 +130,8 @@
}, },
"put": { "put": {
"pool_size_remote": 100, "pool_size_remote": 100,
"pool_size_local": 200 "pool_size_local": 200,
"skip_session_token_issuer_verification": true
} }
}, },
"storage": { "storage": {
@ -168,7 +169,8 @@
"size": 4194304, "size": 4194304,
"depth": 1, "depth": 1,
"width": 4, "width": 4,
"opened_cache_capacity": 50 "opened_cache_capacity": 50,
"leaf_width": 10
}, },
{ {
"type": "fstree", "type": "fstree",
@ -218,7 +220,8 @@
"size": 4194304, "size": 4194304,
"depth": 1, "depth": 1,
"width": 4, "width": 4,
"opened_cache_capacity": 50 "opened_cache_capacity": 50,
"leaf_width": 10
}, },
{ {
"type": "fstree", "type": "fstree",

View file

@ -110,6 +110,7 @@ object:
put: put:
pool_size_remote: 100 # number of async workers for remote PUT operations pool_size_remote: 100 # number of async workers for remote PUT operations
pool_size_local: 200 # number of async workers for local PUT operations pool_size_local: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
storage: storage:
# note: shard configuration can be omitted for relay node (see `node.relay`) # note: shard configuration can be omitted for relay node (see `node.relay`)
@ -145,6 +146,7 @@ storage:
depth: 1 # max depth of object tree storage in key-value DB depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files opened_cache_capacity: 50 # maximum number of opened database files
leaf_width: 10 # max count of key-value DB on leafs of object tree storage
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group) - perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS depth: 5 # max depth of object tree storage in FS

2
debian/control vendored
View file

@ -1,7 +1,7 @@
Source: frostfs-node Source: frostfs-node
Section: misc Section: misc
Priority: optional Priority: optional
Maintainer: NeoSPCC <tech@nspcc.ru> Maintainer: TrueCloudLab <tech@frostfs.info>
Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
Standards-Version: 4.5.1 Standards-Version: 4.5.1
Homepage: https://fs.neo.org/ Homepage: https://fs.neo.org/

4
go.mod
View file

@ -6,11 +6,12 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44 git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230627134746-36f3d39c406a git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230627134746-36f3d39c406a
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230809065235-d48788c7a946 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230828082657-84e7e69f98ac
git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/cheggaaa/pb v1.0.29 github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1 github.com/chzyer/readline v1.5.1
github.com/dgraph-io/ristretto v0.1.1
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
github.com/hashicorp/golang-lru/v2 v2.0.4 github.com/hashicorp/golang-lru/v2 v2.0.4
@ -42,7 +43,6 @@ require (
) )
require ( require (
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.1.0 // indirect github.com/golang/glog v1.1.0 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -17,467 +17,462 @@ const (
) )
const ( const (
InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations"
InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go InnerringCantStopEpochEstimation = "can't stop epoch estimation"
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go InnerringCantGetInnerRingIndex = "can't get inner ring index"
InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go InnerringCantGetInnerRingSize = "can't get inner ring size"
InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go InnerringCantGetAlphabetIndex = "can't get alphabet index"
InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range"
InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list"
InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract"
InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number"
InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go InnerringNotarySupport = "notary support"
InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled"
InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled"
InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global"
InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go InnerringCantVoteForPreparedValidators = "can't vote for prepared validators"
InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go InnerringNewBlock = "new block"
InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go InnerringReadConfigFromBlockchain = "read config from blockchain"
NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object"
PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected"
PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance"
PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK"
PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected"
PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy"
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go PolicerRoutineStopped = "routine stopped"
PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go PolicerFailureAtObjectSelectForReplication = "failure at object select for replication"
ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go PolicerPoolSubmission = "pool submission"
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go ReplicatorFinishWork = "finish work"
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go ReplicatorCouldNotReplicateObject = "could not replicate object"
TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go TreeRedirectingTreeServiceQuery = "redirecting tree service query"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go TreeSynchronizeTree = "synchronize tree"
TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes"
TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go TreeSyncingTrees = "syncing trees..."
TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go TreeCouldNotFetchContainers = "could not fetch containers"
TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go TreeTreesHaveBeenSynchronized = "trees have been synchronized"
TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go TreeSyncingContainerTrees = "syncing container trees..."
TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go TreeCouldNotSyncTrees = "could not sync trees"
TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go TreeContainerTreesHaveBeenSynced = "container trees have been synced"
TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go TreeRemovingRedundantTrees = "removing redundant trees..."
TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go TreeDoNotSendUpdateToTheNode = "do not send update to the node"
PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go TreeFailedToSentUpdateToTheNode = "failed to sent update to the node"
PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go TreeErrorDuringReplication = "error during replication"
PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage"
ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go PersistentCouldNotDeleteSToken = "could not delete token"
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens"
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go ControllerReportIsAlreadyStarted = "report is already started"
DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source"
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY"
DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go DeleteAssemblingChain = "assembling chain..."
DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go DeleteCollectingChildren = "collecting children..."
DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go DeleteSupplementBySplitID = "supplement by split ID"
DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go DeleteFormingTombstoneStructure = "forming tombstone structure..."
DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..."
DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go DeleteFormingSplitInfo = "forming split info..."
DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..."
DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go DeleteMembersSuccessfullyCollected = "members successfully collected"
DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go GetRemoteCallFailed = "remote call failed"
DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go GetCanNotAssembleTheObject = "can not assemble the object"
DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go GetTryingToAssembleTheObject = "trying to assemble the object..."
DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go GetAssemblingSplittedObject = "assembling splitted object..."
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go GetFailedToAssembleSplittedObject = "failed to assemble splitted object"
GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go GetCouldNotGenerateContainerTraverser = "could not generate container traverser"
GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go GetCouldNotConstructRemoteNodeClient = "could not construct remote node client"
GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go GetCouldNotWriteHeader = "could not write header"
GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go GetCouldNotWritePayloadChunk = "could not write payload chunk"
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go GetLocalGetFailed = "local get failed"
GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go GetReturnResultDirectly = "return result directly"
GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go GetCompletingTheOperation = "completing the operation"
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed"
GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go GetRequestedObjectIsVirtual = "requested object is virtual"
GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds"
GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go PutAdditionalContainerBroadcastFailure = "additional container broadcast failure"
GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go SearchReturnResultDirectly = "return result directly"
GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client"
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go SearchRemoteOperationFailed = "remote operation failed"
GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go SearchCouldNotGenerateContainerTraverser = "could not generate container traverser"
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go SearchLocalOperationFailed = "local operation failed"
SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go UtilObjectServiceError = "object service error"
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go NatsNatsConnectionWasLost = "nats: connection was lost"
SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go NatsNatsReconnectedToTheServer = "nats: reconnected to the server"
UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done"
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics"
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics"
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator"
NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted"
NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements"
NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished"
ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerAnnouncementIsAlreadyStarted = "announcement is already started"
ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted"
ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted"
ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements"
ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerCouldNotInitializeResultTarget = "could not initialize result target"
ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted"
ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations"
ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go RouteCouldNotInitializeWriterProvider = "could not initialize writer provider"
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go RouteCouldNotInitializeWriter = "could not initialize writer"
ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go RouteCouldNotPutTheValue = "could not put the value"
ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer"
ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch"
RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch"
RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node"
RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established"
RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go ClientSwitchingToTheNextRPCNode = "switching to the next RPC node"
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node"
ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node"
ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC"
ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node"
ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go ClientNotaryDepositInvoke = "notary deposit invoke"
ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go ClientNotaryRequestInvoked = "notary request invoked"
ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go ClientNeoClientInvoke = "neo client invoke"
ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go ClientNativeGasTransferInvoke = "native gas transfer invoke"
ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go ClientBatchGasTransferInvoke = "batch gas transfer invoke"
ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go ClientCantGetBlockchainHeight243 = "can't get blockchain height"
ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go EventCouldNotStartListenToEvents = "could not start listen to events"
ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go EventStopEventListenerByError = "stop event listener by error"
ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go EventStopEventListenerByContext = "stop event listener by context"
ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go EventNilNotificationEventWasCaught = "nil notification event was caught"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go EventStopEventListenerByNotaryChannel = "stop event listener by notary channel"
EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go EventNilNotaryEventWasCaught = "nil notary event was caught"
EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go EventStopEventListenerByBlockChannel = "stop event listener by block channel"
EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go EventNilBlockWasCaught = "nil block was caught"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go EventListenerWorkerPoolDrained = "listener worker pool drained"
EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go EventEventParserNotSet = "event parser not set"
EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go EventCouldNotParseNotificationEvent = "could not parse notification event"
EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered"
EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event"
EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event"
EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go EventNotaryParserNotSet = "notary parser not set"
EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go EventCouldNotParseNotaryEvent = "could not parse notary event"
EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go EventIgnoreNilEventParser = "ignore nil event parser"
EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go EventRegisteredNewEventParser = "registered new event parser"
EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go EventIgnoreNilEventHandler = "ignore nil event handler"
EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go EventRegisteredNewEventHandler = "registered new event handler"
EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go EventIgnoreNilBlockHandler = "ignore nil block handler"
EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed"
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct"
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain"
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct"
EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain"
SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaOpeningBoltDB = "opening BoltDB"
SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaInitializing = "initializing..."
SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaAlreadyInitialized = "already initialized"
SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range"
SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go BlobovniczaClosingBoltDB = "closing BoltDB"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket"
BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobstorOpening = "opening..."
BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobstorInitializing = "initializing..."
BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobstorClosing = "closing..."
BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobstorCouldntCloseStorage = "couldn't close storage"
BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking"
BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration"
BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go EngineShardHasBeenRemoved = "shard has been removed"
BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go EngineCouldNotCloseRemovedShard = "could not close removed shard"
BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping"
BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard"
BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping"
BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go EngineCouldNotCloseShard = "could not close shard"
EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go EngineCouldNotReloadAShard = "could not reload a shard"
EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go EngineAddedNewShard = "added new shard"
EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation"
EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go EngineCouldNotPutObjectToShard = "could not put object to shard"
EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go EngineErrorDuringSearchingForObjectChildren = "error during searching for object children"
EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go EngineCouldNotInhumeObjectInShard = "could not inhume object in shard"
EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies"
EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine"
EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies"
EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check"
EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold"
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request"
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go EngineStartedShardsEvacuation = "started shards evacuation"
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go MetabaseMissingMatcher = "missing matcher"
EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go MetabaseErrorInFKBTSelection = "error in FKBT selection"
EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" // Info in ../node/pkg/local_object_storage/engine/evacuate.go MetabaseUnknownOperation = "unknown operation"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" // Error in ../node/pkg/local_object_storage/engine/evacuate.go MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go MetabaseCheckingMetabaseVersion = "checking metabase version"
MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go ShardCantSelectAllObjects = "can't select all objects"
MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go ShardSettingShardMode = "setting shard mode"
MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go ShardShardModeSetSuccessfully = "shard mode set successfully"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache"
MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase"
ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor"
ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go ShardFetchingObjectWithoutMeta = "fetching object without meta"
ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go ShardObjectIsMissingInWritecache = "object is missing in write-cache"
ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache"
ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor"
ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go ShardMetaObjectCounterRead = "meta: object counter read"
ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go ShardMetaCantReadContainerList = "meta: can't read container list"
ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go ShardMetaCantReadContainerSize = "meta: can't read container size"
ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go ShardCouldNotUnmarshalObject = "could not unmarshal object"
ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go ShardCouldNotCloseShardComponent = "could not close shard component"
ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go ShardStopEventListenerByClosedChannel = "stop event listener by closed channel"
ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool"
ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go ShardGCIsStopped = "GC is stopped"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..."
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed"
ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go ShardCouldNotDeleteTheObjects = "could not delete the objects"
ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed"
ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardCouldNotInhumeTheObjects = "could not inhume the objects"
ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardStartedExpiredTombstonesHandling = "started expired tombstones handling"
ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go ShardIteratingTombstones = "iterating tombstones"
ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones"
ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardIteratorOverGraveyardFailed = "iterator over graveyard failed"
ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch"
ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling"
ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed"
ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage"
ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records"
ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go ShardFailureToUnlockObjects = "failure to unlock objects"
ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
WritecacheBadgerInitExperimental = "initializing badger-backed experimental writecache" WritecacheBadgerInitExperimental = "initializing badger-backed experimental writecache"
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree"
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks"
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database"
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks"
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go WritecacheCantParseAddress = "can't parse address"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
WritecacheDBValueLogGCRunCompleted = "value log GC run completed" WritecacheDBValueLogGCRunCompleted = "value log GC run completed"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go WritecacheBadgerObjAlreadyScheduled = "object already scheduled for flush"
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza"
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict"
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..."
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated"
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated"
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level"
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza"
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza"
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza"
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed"
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza"
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza"
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza"
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza"
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's"
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..."
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..."
AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza"
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go AlphabetTick = "tick"
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained"
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event"
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event"
AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method"
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetStorageNodeEmissionIsOff = "storage node emission is off"
AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes"
AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetGasEmission = "gas emission"
AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetCantParseNodePublicKey = "can't parse node public key"
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go AlphabetCantTransferGas = "can't transfer gas"
AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go AlphabetCantTransferGasToWallet = "can't transfer gas to wallet"
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go AlphabetAlphabetWorkerPool = "alphabet worker pool"
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go BalanceBalanceWorkerPoolDrained = "balance worker pool drained"
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock"
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go BalanceCantSendLockAssetTx = "can't send lock asset tx"
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go BalanceBalanceWorkerPool = "balance worker pool"
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go ContainerContainerWorkerPool = "container worker pool"
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained"
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put"
ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go ContainerPutContainerCheckFailed = "put container check failed"
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go ContainerCouldNotApprovePutContainer = "could not approve put container"
ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go ContainerSetEACLCheckFailed = "set EACL check failed"
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind"
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go FrostFSInvalidManageKeyEvent = "invalid manage key event"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes"
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go FrostFSFrostfsWorkerPool = "frostfs worker pool"
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained"
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit"
FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract"
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSDoubleMintEmissionDeclined = "double mint emission declined"
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node"
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached"
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver"
FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw"
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSCantCreateLockAccount = "can't create lock account"
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw"
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque"
GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract"
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go GovernanceNewEvent = "new event"
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained"
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync"
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net"
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain"
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain"
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed"
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update"
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee"
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceFinishedAlphabetListUpdate = "finished alphabet list update"
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain"
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys"
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceUpdateOfTheInnerRingList = "update of the inner ring list"
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys"
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain"
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract"
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go NetmapNetmapWorkerPool = "netmap worker pool"
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go NetmapTick = "tick"
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go NetmapNetmapWorkerPoolDrained = "netmap worker pool drained"
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled"
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick"
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node"
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap"
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState"
NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache"
NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantGetEpochDuration = "can't get epoch duration"
NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantGetTransactionHeight = "can't get transaction height"
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantResetEpochTimer = "can't reset epoch timer"
NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapCantStartContainerSizeEstimation = "can't start container size estimation"
NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go NetmapNextEpoch = "next epoch"
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification"
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapNonhaltNotaryTransaction = "non-halt notary transaction"
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapCantParseNetworkMapCandidate = "can't parse network map candidate"
NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate"
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapApprovingNetworkMapCandidate = "approving network map candidate"
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer"
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification"
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state"
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer"
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go FrostFSIRInternalError = "internal error"
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server"
FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" // Debug in ../node/pkg/morph/client/constructor.go FrostFSIRApplicationStopped = "application stopped"
FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" // Info in ../node/pkg/morph/client/constructor.go FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint"
FrostFSIRReloadExtraWallets = "reload extra wallets" // Info in ../node/cmd/frostfs-ir/config.go FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint"
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go FrostFSIRReloadExtraWallets = "reload extra wallets"
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file"
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint"
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint"
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint"
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go FrostFSNodeStoppingGRPCServer = "stopping gRPC server..."
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop"
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully"
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop"
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance"
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance"
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine"
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeShardAttachedToEngine = "shard attached to engine"
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..."
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure"
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully"
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state"
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state"
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..."
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete"
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeInternalApplicationError = "internal application error"
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete"
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..."
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying"
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract"
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine"
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully"
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go FrostFSNodeFailedInitTracing = "failed init tracing"
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeFailedShutdownTracing = "failed shutdown tracing"
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client"
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeClosingMorphComponents = "closing morph components..."
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global"
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go FrostFSNodeNotarySupport = "notary support"
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network"
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain"
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go FrostFSNodeNewBlock = "new block"
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go FrostFSNodeCantUpdatePersistentState = "can't update persistent state"
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx"
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch"
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit"
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go FrostFSNodeInitialNetworkState = "initial network state"
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization"
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service"
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container"
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed"
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)"
FrostFSNodePolicerIsDisabled = "policer is disabled" FrostFSNodePolicerIsDisabled = "policer is disabled"
CommonApplicationStarted = "application started" CommonApplicationStarted = "application started"
ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started" ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
@ -509,9 +504,10 @@ const (
TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch" TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
FrostFSNodeCantUpdateObjectStorageID = "can't update object storage ID" FrostFSNodeCantUpdateObjectStorageID = "can't update object storage ID"
FrostFSNodeCantFlushObjectToBlobstor = "can't flush an object to blobstor" FrostFSNodeCantFlushObjectToBlobstor = "can't flush an object to blobstor"
FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB" // Error in ../node/cmd/frostfs-node/morph.go FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB"
FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB" // Error in ../node/cmd/frostfs-node/morph.go FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB"
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
FailedToCountWritecacheItems = "failed to count writecache items" FailedToCountWritecacheItems = "failed to count writecache items"
AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza"
) )

View file

@ -20,6 +20,15 @@ type Container struct {
Session *session.Container Session *session.Container
} }
// DelInfo contains info about removed container.
type DelInfo struct {
// Container owner.
Owner []byte
// Epoch indicates when the container was removed.
Epoch int
}
// Source is an interface that wraps // Source is an interface that wraps
// basic container receiving method. // basic container receiving method.
type Source interface { type Source interface {
@ -32,6 +41,8 @@ type Source interface {
// Implementations must not retain the container pointer and modify // Implementations must not retain the container pointer and modify
// the container through it. // the container through it.
Get(cid.ID) (*Container, error) Get(cid.ID) (*Container, error)
DeletionInfo(cid.ID) (*DelInfo, error)
} }
// EACL groups information about the FrostFS container's extended ACL stored in // EACL groups information about the FrostFS container's extended ACL stored in

View file

@ -0,0 +1,22 @@
package container
import (
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch.
func WasRemoved(s Source, cid cid.ID) (bool, error) {
_, err := s.DeletionInfo(cid)
if err == nil {
return true, nil
}
var errContainerNotFound *apistatus.ContainerNotFound
if errors.As(err, &errContainerNotFound) {
return false, nil
}
return false, err
}

View file

@ -3,13 +3,17 @@ package object
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -20,6 +24,8 @@ import (
// FormatValidator represents an object format validator. // FormatValidator represents an object format validator.
type FormatValidator struct { type FormatValidator struct {
*cfg *cfg
senderClassifier SenderClassifier
} }
// FormatValidatorOption represents a FormatValidator constructor option. // FormatValidatorOption represents a FormatValidator constructor option.
@ -28,6 +34,11 @@ type FormatValidatorOption func(*cfg)
type cfg struct { type cfg struct {
netState netmap.State netState netmap.State
e LockSource e LockSource
ir InnerRing
netmap netmap.Source
containers container.Source
log *logger.Logger
verifyTokenIssuer bool
} }
// DeleteHandler is an interface of delete queue processor. // DeleteHandler is an interface of delete queue processor.
@ -80,6 +91,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
return &FormatValidator{ return &FormatValidator{
cfg: cfg, cfg: cfg,
senderClassifier: NewSenderClassifier(cfg.ir, cfg.netmap, cfg.log),
} }
} }
@ -153,14 +165,52 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
} }
token := obj.SessionToken() token := obj.SessionToken()
ownerID := *obj.OwnerID()
if token == nil || !token.AssertAuthKey(&key) { if token == nil || !token.AssertAuthKey(&key) {
return v.checkOwnerKey(*obj.OwnerID(), key) return v.checkOwnerKey(ownerID, key)
}
if v.verifyTokenIssuer {
signerIsIROrContainerNode, err := v.isIROrContainerNode(obj, binKey)
if err != nil {
return err
}
if signerIsIROrContainerNode {
return nil
}
if !token.Issuer().Equals(ownerID) {
return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", v, token.Issuer(), ownerID)
}
return nil
} }
return nil return nil
} }
func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (bool, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return false, errNilCID
}
cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin)
cnr, err := v.containers.Get(cnrID)
if err != nil {
return false, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
if err != nil {
return false, err
}
return res.Role == acl.RoleContainer || res.Role == acl.RoleInnerRing, nil
}
func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error { func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error {
var id2 user.ID var id2 user.ID
user.IDFromKey(&id2, (ecdsa.PublicKey)(key)) user.IDFromKey(&id2, (ecdsa.PublicKey)(key))
@ -382,3 +432,38 @@ func WithLockSource(e LockSource) FormatValidatorOption {
c.e = e c.e = e
} }
} }
// WithInnerRing return option to set Inner Ring source.
func WithInnerRing(ir InnerRing) FormatValidatorOption {
return func(c *cfg) {
c.ir = ir
}
}
// WithNetmapSource return option to set Netmap source.
func WithNetmapSource(ns netmap.Source) FormatValidatorOption {
return func(c *cfg) {
c.netmap = ns
}
}
// WithContainersSource return option to set Containers source.
func WithContainersSource(cs container.Source) FormatValidatorOption {
return func(c *cfg) {
c.containers = cs
}
}
// WithVerifySessionTokenIssuer return option to set verify session token issuer value.
func WithVerifySessionTokenIssuer(verifySessionTokenIssuer bool) FormatValidatorOption {
return func(c *cfg) {
c.verifyTokenIssuer = verifySessionTokenIssuer
}
}
// WithLogger return option to set logger.
func WithLogger(l *logger.Logger) FormatValidatorOption {
return func(c *cfg) {
c.log = l
}
}

View file

@ -3,18 +3,27 @@ package object
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"fmt"
"strconv" "strconv"
"testing" "testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
) )
func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object { func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object {
@ -56,6 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch, epoch: curEpoch,
}), }),
WithLockSource(ls), WithLockSource(ls),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
) )
ownerKey, err := keys.NewPrivateKey() ownerKey, err := keys.NewPrivateKey()
@ -254,3 +264,356 @@ func TestFormatValidator_Validate(t *testing.T) {
}) })
}) })
} }
func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
const curEpoch = 13
ls := testLockSource{
m: make(map[oid.Address]bool),
}
signer, err := keys.NewPrivateKey()
require.NoError(t, err)
var owner user.ID
ownerPrivKey, err := keys.NewPrivateKey()
require.NoError(t, err)
user.IDFromKey(&owner, ownerPrivKey.PrivateKey.PublicKey)
t.Run("different issuer and owner, verify issuer disabled", func(t *testing.T) {
t.Parallel()
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is IR node, verify issuer enabled", func(t *testing.T) {
t.Parallel()
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{signer.PublicKey().Bytes()},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is container node in current epoch, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var node netmap.NodeInfo
node.SetPublicKey(signer.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{node})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is container node in previous epoch, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var issuerNode netmap.NodeInfo
issuerNode.SetPublicKey(signer.PublicKey().Bytes())
var nonIssuerNode netmap.NodeInfo
nonIssuerKey, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode.SetPublicKey(nonIssuerKey.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode})
previousEpochNM := &netmap.NetMap{}
previousEpochNM.SetEpoch(curEpoch - 1)
previousEpochNM.SetNodes([]netmap.NodeInfo{issuerNode})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is unknown, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var nonIssuerNode1 netmap.NodeInfo
nonIssuerKey1, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode1.SetPublicKey(nonIssuerKey1.PublicKey().Bytes())
var nonIssuerNode2 netmap.NodeInfo
nonIssuerKey2, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode2.SetPublicKey(nonIssuerKey2.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode1})
previousEpochNM := &netmap.NetMap{}
previousEpochNM.SetEpoch(curEpoch - 1)
previousEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode2})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.Error(t, v.Validate(context.Background(), obj, false))
})
}
type testIRSource struct {
irNodes [][]byte
}
func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil
}
type testContainerSource struct {
containers map[cid.ID]*container.Container
}
func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
type testNetmapSource struct {
netmaps map[uint64]*netmap.NetMap
currentEpoch uint64
}
func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff")
}
return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, fmt.Errorf("netmap not found")
}
func (s *testNetmapSource) Epoch() (uint64, error) {
return s.currentEpoch, nil
}

View file

@ -1,4 +1,4 @@
package v2 package object
import ( import (
"bytes" "bytes"
@ -11,50 +11,64 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap" "go.uber.org/zap"
) )
type senderClassifier struct { type InnerRing interface {
InnerRingKeys() ([][]byte, error)
}
type SenderClassifier struct {
log *logger.Logger log *logger.Logger
innerRing InnerRingFetcher innerRing InnerRing
netmap core.Source netmap core.Source
} }
type classifyResult struct { func NewSenderClassifier(innerRing InnerRing, netmap core.Source, log *logger.Logger) SenderClassifier {
role acl.Role return SenderClassifier{
key []byte log: log,
innerRing: innerRing,
netmap: netmap,
}
} }
func (c senderClassifier) classify( type ClassifyResult struct {
req MetaWithToken, Role acl.Role
idCnr cid.ID, Key []byte
cnr container.Container) (res *classifyResult, err error) { }
ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return nil, err
}
func (c SenderClassifier) Classify(
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
cnr container.Container) (res *ClassifyResult, err error) {
ownerKeyInBytes := ownerKey.Bytes() ownerKeyInBytes := ownerKey.Bytes()
// TODO: #767 get owner from frostfs.id if present // TODO: #767 get owner from frostfs.id if present
// if request owner is the same as container owner, return RoleUser // if request owner is the same as container owner, return RoleUser
if ownerID.Equals(cnr.Owner()) { if ownerID.Equals(cnr.Owner()) {
return &classifyResult{ return &ClassifyResult{
role: acl.RoleOwner, Role: acl.RoleOwner,
key: ownerKeyInBytes, Key: ownerKeyInBytes,
}, nil }, nil
} }
return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
}
func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil { if err != nil {
// do not throw error, try best case matching // do not throw error, try best case matching
c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing, c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error())) zap.String("error", err.Error()))
} else if isInnerRingNode { } else if isInnerRingNode {
return &classifyResult{ return &ClassifyResult{
role: acl.RoleInnerRing, Role: acl.RoleInnerRing,
key: ownerKeyInBytes, Key: ownerKeyInBytes,
}, nil }, nil
} }
@ -69,20 +83,20 @@ func (c senderClassifier) classify(
c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode, c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error())) zap.String("error", err.Error()))
} else if isContainerNode { } else if isContainerNode {
return &classifyResult{ return &ClassifyResult{
role: acl.RoleContainer, Role: acl.RoleContainer,
key: ownerKeyInBytes, Key: ownerKeyInBytes,
}, nil }, nil
} }
// if none of above, return RoleOthers // if none of above, return RoleOthers
return &classifyResult{ return &ClassifyResult{
role: acl.RoleOthers, Role: acl.RoleOthers,
key: ownerKeyInBytes, Key: ownerKeyInBytes,
}, nil }, nil
} }
func (c senderClassifier) isInnerRingKey(owner []byte) (bool, error) { func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
innerRingKeys, err := c.innerRing.InnerRingKeys() innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil { if err != nil {
return false, err return false, err
@ -98,7 +112,7 @@ func (c senderClassifier) isInnerRingKey(owner []byte) (bool, error) {
return false, nil return false, nil
} }
func (c senderClassifier) isContainerKey( func (c SenderClassifier) isContainerKey(
owner, idCnr []byte, owner, idCnr []byte,
cnr container.Container) (bool, error) { cnr container.Container) (bool, error) {
nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap

View file

@ -17,6 +17,7 @@ type Blobovnicza struct {
cfg cfg
dataSize atomic.Uint64 dataSize atomic.Uint64
itemsCount atomic.Uint64
boltDB *bbolt.DB boltDB *bbolt.DB

View file

@ -3,7 +3,6 @@ package blobovnicza
import ( import (
"context" "context"
"crypto/rand" "crypto/rand"
"errors"
"os" "os"
"testing" "testing"
@ -64,7 +63,7 @@ func TestBlobovnicza(t *testing.T) {
WithPath(p), WithPath(p),
WithObjectSizeLimit(objSizeLim), WithObjectSizeLimit(objSizeLim),
WithFullSizeLimit(sizeLim), WithFullSizeLimit(sizeLim),
WithLogger(test.NewLogger(t, false)), WithLogger(test.NewLogger(t, true)),
) )
defer os.Remove(p) defer os.Remove(p)
@ -98,9 +97,9 @@ func TestBlobovnicza(t *testing.T) {
testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil) testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil)
} }
// from now objects should not be saved // blobovnizca accepts object event if full
testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool { testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool {
return errors.Is(err, ErrFull) return err == nil
}, nil) }, nil)
require.NoError(t, blz.Close()) require.NoError(t, blz.Close())

View file

@ -14,7 +14,7 @@ import (
// Open opens an internal database at the configured path with the configured permissions. // Open opens an internal database at the configured path with the configured permissions.
// //
// If the database file does not exist, it will be created automatically. // If the database file does not exist, it will be created automatically.
// If blobovnizca is already open, does nothing. // If blobovnicza is already open, does nothing.
func (b *Blobovnicza) Open() error { func (b *Blobovnicza) Open() error {
b.controlMtx.Lock() b.controlMtx.Lock()
defer b.controlMtx.Unlock() defer b.controlMtx.Unlock()
@ -45,7 +45,7 @@ func (b *Blobovnicza) Open() error {
b.boltDB, err = bbolt.Open(b.path, b.perm, b.boltOptions) b.boltDB, err = bbolt.Open(b.path, b.perm, b.boltOptions)
if err == nil { if err == nil {
b.opened = true b.opened = true
b.metrics.IncOpenBlobovnizcaCount() b.metrics.IncOpenBlobovniczaCount()
} }
return err return err
@ -54,13 +54,13 @@ func (b *Blobovnicza) Open() error {
// Init initializes internal database structure. // Init initializes internal database structure.
// //
// If Blobovnicza is already initialized, no action is taken. // If Blobovnicza is already initialized, no action is taken.
// Blobovnizca must be open, otherwise an error will return. // Blobovnicza must be open, otherwise an error will return.
func (b *Blobovnicza) Init() error { func (b *Blobovnicza) Init() error {
b.controlMtx.Lock() b.controlMtx.Lock()
defer b.controlMtx.Unlock() defer b.controlMtx.Unlock()
if !b.opened { if !b.opened {
return errors.New("blobovnizca is not open") return errors.New("blobovnicza is not open")
} }
b.log.Debug(logs.BlobovniczaInitializing, b.log.Debug(logs.BlobovniczaInitializing,
@ -68,8 +68,10 @@ func (b *Blobovnicza) Init() error {
zap.Uint64("storage size limit", b.fullSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit),
) )
if size := b.dataSize.Load(); size != 0 { size := b.dataSize.Load()
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size)) items := b.itemsCount.Load()
if size != 0 || items != 0 {
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
return nil return nil
} }
@ -96,14 +98,17 @@ func (b *Blobovnicza) Init() error {
} }
} }
return b.initializeSize() return b.initializeCounters()
} }
func (b *Blobovnicza) initializeSize() error { func (b *Blobovnicza) initializeCounters() error {
var size uint64 var size uint64
var items uint64
err := b.boltDB.View(func(tx *bbolt.Tx) error { err := b.boltDB.View(func(tx *bbolt.Tx) error {
return b.iterateAllBuckets(tx, func(lower, upper uint64, b *bbolt.Bucket) (bool, error) { return b.iterateAllBuckets(tx, func(lower, upper uint64, b *bbolt.Bucket) (bool, error) {
size += uint64(b.Stats().KeyN) * upper keysN := uint64(b.Stats().KeyN)
size += keysN * upper
items += keysN
return false, nil return false, nil
}) })
}) })
@ -111,13 +116,15 @@ func (b *Blobovnicza) initializeSize() error {
return fmt.Errorf("can't determine DB size: %w", err) return fmt.Errorf("can't determine DB size: %w", err)
} }
b.dataSize.Store(size) b.dataSize.Store(size)
b.metrics.AddOpenBlobovnizcaSize(size) b.itemsCount.Store(items)
b.metrics.AddOpenBlobovniczaSize(size)
b.metrics.AddOpenBlobovniczaItems(items)
return nil return nil
} }
// Close releases all internal database resources. // Close releases all internal database resources.
// //
// If blobovnizca is already closed, does nothing. // If blobovnicza is already closed, does nothing.
func (b *Blobovnicza) Close() error { func (b *Blobovnicza) Close() error {
b.controlMtx.Lock() b.controlMtx.Lock()
defer b.controlMtx.Unlock() defer b.controlMtx.Unlock()
@ -134,9 +141,11 @@ func (b *Blobovnicza) Close() error {
return err return err
} }
b.metrics.DecOpenBlobovnizcaCount() b.metrics.DecOpenBlobovniczaCount()
b.metrics.SubOpenBlobovnizcaSize(b.dataSize.Load()) b.metrics.SubOpenBlobovniczaSize(b.dataSize.Load())
b.metrics.SubOpenBlobovniczaItems(b.itemsCount.Load())
b.dataSize.Store(0) b.dataSize.Store(0)
b.itemsCount.Store(0)
b.opened = false b.opened = false

View file

@ -74,7 +74,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
zap.String("binary size", stringifyByteSize(dataSize)), zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
) )
b.decSize(sizeUpperBound) b.itemDeleted(sizeUpperBound)
} }
return DeleteRes{}, err return DeleteRes{}, err

View file

@ -1,16 +1,21 @@
package blobovnicza package blobovnicza
type Metrics interface { type Metrics interface {
IncOpenBlobovnizcaCount() IncOpenBlobovniczaCount()
DecOpenBlobovnizcaCount() DecOpenBlobovniczaCount()
AddOpenBlobovnizcaSize(size uint64) AddOpenBlobovniczaSize(size uint64)
SubOpenBlobovnizcaSize(size uint64) SubOpenBlobovniczaSize(size uint64)
AddOpenBlobovniczaItems(items uint64)
SubOpenBlobovniczaItems(items uint64)
} }
type NoopMetrics struct{} type NoopMetrics struct{}
func (m *NoopMetrics) IncOpenBlobovnizcaCount() {} func (m *NoopMetrics) IncOpenBlobovniczaCount() {}
func (m *NoopMetrics) DecOpenBlobovnizcaCount() {} func (m *NoopMetrics) DecOpenBlobovniczaCount() {}
func (m *NoopMetrics) AddOpenBlobovnizcaSize(uint64) {} func (m *NoopMetrics) AddOpenBlobovniczaSize(uint64) {}
func (m *NoopMetrics) SubOpenBlobovnizcaSize(uint64) {} func (m *NoopMetrics) SubOpenBlobovniczaSize(uint64) {}
func (m *NoopMetrics) AddOpenBlobovniczaItems(uint64) {}
func (m *NoopMetrics) SubOpenBlobovniczaItems(uint64) {}

View file

@ -23,10 +23,6 @@ type PutPrm struct {
type PutRes struct { type PutRes struct {
} }
// ErrFull is returned when trying to save an
// object to a filled blobovnicza.
var ErrFull = logicerr.New("blobovnicza is full")
// SetAddress sets the address of the saving object. // SetAddress sets the address of the saving object.
func (p *PutPrm) SetAddress(addr oid.Address) { func (p *PutPrm) SetAddress(addr oid.Address) {
p.addr = addr p.addr = addr
@ -65,10 +61,6 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
key := addressKey(prm.addr) key := addressKey(prm.addr)
err := b.boltDB.Batch(func(tx *bbolt.Tx) error { err := b.boltDB.Batch(func(tx *bbolt.Tx) error {
if b.full() {
return ErrFull
}
buck := tx.Bucket(bucketName) buck := tx.Bucket(bucketName)
if buck == nil { if buck == nil {
// expected to happen: // expected to happen:
@ -85,7 +77,7 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
return nil return nil
}) })
if err == nil { if err == nil {
b.incSize(upperBound) b.itemAdded(upperBound)
} }
return PutRes{}, err return PutRes{}, err

View file

@ -3,6 +3,7 @@ package blobovnicza
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"math"
"math/bits" "math/bits"
"strconv" "strconv"
) )
@ -40,16 +41,20 @@ func upperPowerOfTwo(v uint64) uint64 {
return 1 << bits.Len64(v-1) return 1 << bits.Len64(v-1)
} }
func (b *Blobovnicza) incSize(sz uint64) { func (b *Blobovnicza) itemAdded(itemSize uint64) {
b.dataSize.Add(sz) b.dataSize.Add(itemSize)
b.metrics.AddOpenBlobovnizcaSize(sz) b.itemsCount.Add(1)
b.metrics.AddOpenBlobovniczaSize(itemSize)
b.metrics.AddOpenBlobovniczaItems(1)
} }
func (b *Blobovnicza) decSize(sz uint64) { func (b *Blobovnicza) itemDeleted(itemSize uint64) {
b.dataSize.Add(^(sz - 1)) b.dataSize.Add(^(itemSize - 1))
b.metrics.SubOpenBlobovnizcaSize(sz) b.itemsCount.Add(math.MaxUint64)
b.metrics.SubOpenBlobovniczaSize(itemSize)
b.metrics.SubOpenBlobovniczaItems(1)
} }
func (b *Blobovnicza) full() bool { func (b *Blobovnicza) IsFull() bool {
return b.dataSize.Load() >= b.fullSizeLimit return b.dataSize.Load() >= b.fullSizeLimit
} }

View file

@ -0,0 +1,213 @@
package blobovniczatree
import (
"path/filepath"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
)
type activeDB struct {
blz *blobovnicza.Blobovnicza
shDB *sharedDB
}
func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
func (db *activeDB) Close() {
db.shDB.Close()
}
func (db *activeDB) Path() string {
return db.shDB.Path()
}
// activeDBManager manages active blobovnicza instances (that is, those that are being used for Put).
//
// Uses dbManager for opening/closing sharedDB instances.
// Stores a reference to an open active sharedDB, so dbManager does not close it.
// When changing the active sharedDB, releases the reference to the previous active sharedDB.
type activeDBManager struct {
levelToActiveDBGuard *sync.RWMutex
levelToActiveDB map[string]*sharedDB
levelLock *utilSync.KeyLocker[string]
closed bool
dbManager *dbManager
leafWidth uint64
}
func newActiveDBManager(dbManager *dbManager, leafWidth uint64) *activeDBManager {
return &activeDBManager{
levelToActiveDBGuard: &sync.RWMutex{},
levelToActiveDB: make(map[string]*sharedDB),
levelLock: utilSync.NewKeyLocker[string](),
dbManager: dbManager,
leafWidth: leafWidth,
}
}
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
activeDB, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
if activeDB != nil {
return activeDB, nil
}
return m.updateAndGetActive(lvlPath)
}
func (m *activeDBManager) Open() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
m.closed = false
}
func (m *activeDBManager) Close() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
db.Close()
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
if m.closed {
return nil, errClosed
}
db, ok := m.levelToActiveDB[lvlPath]
if !ok {
return nil, nil
}
blz, err := db.Open() //open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
db.Close()
return nil, nil
}
return &activeDB{
blz: blz,
shDB: db,
}, nil
}
func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
current, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
if current != nil {
return current, nil
}
nextShDB, err := m.getNextSharedDB(lvlPath)
if err != nil {
return nil, err
}
if nextShDB == nil {
return nil, nil
}
blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
return &activeDB{
blz: blz,
shDB: nextShDB,
}, nil
}
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
var idx uint64
var iterCount uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
idx = (currentIdx + 1) % m.leafWidth
}
var next *sharedDB
for iterCount < m.leafWidth {
path := filepath.Join(lvlPath, u64ToHexString(idx))
shDB := m.dbManager.GetByPath(path)
db, err := shDB.Open() //open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
if db.IsFull() {
shDB.Close()
} else {
next = shDB
break
}
idx = (idx + 1) % m.leafWidth
iterCount++
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
next.Close() // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
previous.Close()
}
return next, nil
}
func (m *activeDBManager) hasActiveDB(lvlPath string) (bool, uint64) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
if m.closed {
return false, 0
}
db, ok := m.levelToActiveDB[lvlPath]
if !ok {
return false, 0
}
return true, u64FromHexString(filepath.Base(db.Path()))
}
func (m *activeDBManager) replace(lvlPath string, shDB *sharedDB) (*sharedDB, bool) {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
if m.closed {
return nil, false
}
previous := m.levelToActiveDB[lvlPath]
if shDB == nil {
delete(m.levelToActiveDB, lvlPath)
} else {
m.levelToActiveDB[lvlPath] = shDB
}
return previous, true
}

View file

@ -3,19 +3,12 @@ package blobovniczatree
import ( import (
"errors" "errors"
"fmt" "fmt"
"path/filepath"
"strconv" "strconv"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw" "git.frostfs.info/TrueCloudLab/hrw"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
) )
// Blobovniczas represents the storage of the "small" objects. // Blobovniczas represents the storage of the "small" objects.
@ -61,35 +54,16 @@ import (
type Blobovniczas struct { type Blobovniczas struct {
cfg cfg
// cache of opened filled Blobovniczas commondbManager *dbManager
opened *simplelru.LRU[string, *blobovnicza.Blobovnicza] activeDBManager *activeDBManager
// lruMtx protects opened cache. dbCache *dbCache
// It isn't RWMutex because `Get` calls must
// lock this mutex on write, as LRU info is updated.
// It must be taken after activeMtx in case when eviction is possible
// i.e. `Add`, `Purge` and `Remove` calls.
lruMtx sync.Mutex
// mutex to exclude parallel bbolt.Open() calls
// bbolt.Open() deadlocks if it tries to open already opened file
openMtx sync.Mutex
// list of active (opened, non-filled) Blobovniczas
activeMtx sync.RWMutex
active map[string]blobovniczaWithIndex
}
type blobovniczaWithIndex struct {
ind uint64
blz *blobovnicza.Blobovnicza
} }
var _ common.Storage = (*Blobovniczas)(nil) var _ common.Storage = (*Blobovniczas)(nil)
var errPutFailed = errors.New("could not save the object in any blobovnicza") var errPutFailed = errors.New("could not save the object in any blobovnicza")
// NewBlobovniczaTree returns new instance of blobovnizas tree. // NewBlobovniczaTree returns new instance of blobovniczas tree.
func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) { func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
blz = new(Blobovniczas) blz = new(Blobovniczas)
initConfig(&blz.cfg) initConfig(&blz.cfg)
@ -98,116 +72,17 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
opts[i](&blz.cfg) opts[i](&blz.cfg)
} }
cache, err := simplelru.NewLRU[string, *blobovnicza.Blobovnicza](blz.openedCacheSize, func(p string, value *blobovnicza.Blobovnicza) { if blz.blzLeafWidth == 0 {
lvlPath := filepath.Dir(p) blz.blzLeafWidth = blz.blzShallowWidth
if b, ok := blz.active[lvlPath]; ok && b.ind == u64FromHexString(filepath.Base(p)) {
// This branch is taken if we have recently updated active blobovnicza and remove
// it from opened cache.
return
} else if err := value.Close(); err != nil {
blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", p),
zap.String("error", err.Error()),
)
} else {
blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict,
zap.String("id", p),
)
}
})
if err != nil {
// occurs only if the size is not positive
panic(fmt.Errorf("could not create LRU cache of size %d: %w", blz.openedCacheSize, err))
} }
cp := uint64(1) blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.blzLeafWidth, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
for i := uint64(0); i < blz.blzShallowDepth; i++ { blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.blzLeafWidth)
cp *= blz.blzShallowWidth blz.dbCache = newDBCache(blz.openedCacheSize, blz.commondbManager)
}
blz.opened = cache
blz.active = make(map[string]blobovniczaWithIndex, cp)
return blz return blz
} }
// activates and returns activated blobovnicza of p-level (dir).
//
// returns error if blobvnicza could not be activated.
func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error) {
return b.updateAndGet(lvlPath, nil)
}
// updates active blobovnicza of p-level (dir).
//
// if current active blobovnicza's index is not old, it remains unchanged.
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath))
_, err := b.updateAndGet(lvlPath, old)
b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath))
return err
}
// updates and returns active blobovnicza of p-level (dir).
//
// if current active blobovnicza's index is not old, it is returned unchanged.
func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWithIndex, error) {
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok {
if old != nil {
if active.ind == b.blzShallowWidth-1 {
return active, logicerr.New("no more Blobovniczas")
} else if active.ind != *old {
// sort of CAS in order to control concurrent
// updateActive calls
return active, nil
}
} else {
return active, nil
}
active.ind++
}
var err error
if active.blz, err = b.openBlobovnicza(filepath.Join(lvlPath, u64ToHexString(active.ind))); err != nil {
return active, err
}
b.activeMtx.Lock()
defer b.activeMtx.Unlock()
// check 2nd time to find out if it blobovnicza was activated while thread was locked
tryActive, ok := b.active[lvlPath]
if ok && tryActive.blz == active.blz {
return tryActive, nil
}
// Remove from opened cache (active blobovnicza should always be opened).
// Because `onEvict` callback is called in `Remove`, we need to update
// active map beforehand.
b.active[lvlPath] = active
activePath := filepath.Join(lvlPath, u64ToHexString(active.ind))
b.lruMtx.Lock()
b.opened.Remove(activePath)
if ok {
b.opened.Add(filepath.Join(lvlPath, u64ToHexString(tryActive.ind)), tryActive.blz)
}
b.lruMtx.Unlock()
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated,
zap.String("path", activePath))
return active, nil
}
// returns hash of the object address. // returns hash of the object address.
func addressHash(addr *oid.Address, path string) uint64 { func addressHash(addr *oid.Address, path string) uint64 {
var a string var a string

View file

@ -0,0 +1,103 @@
package blobovniczatree
import (
"fmt"
"sync"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// dbCache caches sharedDB instances that are NOT open for Put.
//
// Uses dbManager for opening/closing sharedDB instances.
// Stores a reference to an cached sharedDB, so dbManager does not close it.
type dbCache struct {
cacheGuard *sync.RWMutex
cache simplelru.LRUCache[string, *sharedDB]
pathLock *utilSync.KeyLocker[string]
closed bool
dbManager *dbManager
}
func newDBCache(size int, dbManager *dbManager) *dbCache {
cache, err := simplelru.NewLRU[string, *sharedDB](size, func(_ string, evictedDB *sharedDB) {
evictedDB.Close()
})
if err != nil {
// occurs only if the size is not positive
panic(fmt.Errorf("could not create LRU cache of size %d: %w", size, err))
}
return &dbCache{
cacheGuard: &sync.RWMutex{},
cache: cache,
dbManager: dbManager,
pathLock: utilSync.NewKeyLocker[string](),
}
}
func (c *dbCache) Open() {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
c.closed = false
}
func (c *dbCache) Close() {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
c.cache.Purge()
c.closed = true
}
func (c *dbCache) GetOrCreate(path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
return c.create(path)
}
func (c *dbCache) getExisted(path string) *sharedDB {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
if value, ok := c.cache.Get(path); ok {
return value
}
return nil
}
func (c *dbCache) create(path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
value := c.getExisted(path)
if value != nil {
return value
}
value = c.dbManager.GetByPath(path)
_, err := value.Open() //open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
value.Close()
}
return value
}
func (c *dbCache) put(path string, db *sharedDB) bool {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
if !c.closed {
c.cache.Add(path, db)
return true
}
return false
}

View file

@ -0,0 +1,59 @@
package blobovniczatree
import (
"context"
"sync"
"sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
func TestBlobovniczaTree_Concurrency(t *testing.T) {
t.Parallel()
const n = 1000
st := NewBlobovniczaTree(
WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(10),
WithBlobovniczaShallowDepth(1),
WithRootPath(t.TempDir()))
require.NoError(t, st.Open(false))
require.NoError(t, st.Init())
t.Cleanup(func() {
require.NoError(t, st.Close())
})
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
var cnt atomic.Int64
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for cnt.Add(1) <= n {
obj := objGen.Next()
addr := testutil.AddressFromObject(t, obj)
raw, err := obj.Marshal()
require.NoError(t, err)
_, err = st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
})
require.NoError(t, err)
_, err = st.Get(context.Background(), common.GetPrm{Address: addr})
require.NoError(t, err)
}
}()
}
wg.Wait()
}

View file

@ -2,11 +2,8 @@ package blobovniczatree
import ( import (
"context" "context"
"fmt"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -14,6 +11,7 @@ import (
func (b *Blobovniczas) Open(readOnly bool) error { func (b *Blobovniczas) Open(readOnly bool) error {
b.readOnly = readOnly b.readOnly = readOnly
b.metrics.SetMode(readOnly) b.metrics.SetMode(readOnly)
b.openManagers()
return nil return nil
} }
@ -29,115 +27,40 @@ func (b *Blobovniczas) Init() error {
} }
return b.iterateLeaves(context.TODO(), func(p string) (bool, error) { return b.iterateLeaves(context.TODO(), func(p string) (bool, error) {
blz, err := b.openBlobovniczaNoCache(p) shBlz := b.getBlobovniczaWithoutCaching(p)
_, err := shBlz.Open()
if err != nil { if err != nil {
return true, err return true, err
} }
defer blz.Close() defer shBlz.Close()
if err := blz.Init(); err != nil {
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
}
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
return false, nil return false, nil
}) })
} }
func (b *Blobovniczas) openManagers() {
b.commondbManager.Open() //order important
b.activeDBManager.Open()
b.dbCache.Open()
}
// Close implements common.Storage. // Close implements common.Storage.
func (b *Blobovniczas) Close() error { func (b *Blobovniczas) Close() error {
b.activeMtx.Lock() b.dbCache.Close() //order important
b.activeDBManager.Close()
b.lruMtx.Lock() b.commondbManager.Close()
for p, v := range b.active {
if err := v.blz.Close(); err != nil {
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", p),
zap.String("error", err.Error()),
)
}
b.opened.Remove(p)
}
for _, k := range b.opened.Keys() {
blz, _ := b.opened.Get(k)
if err := blz.Close(); err != nil {
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", k),
zap.String("error", err.Error()),
)
}
b.opened.Remove(k)
}
b.active = make(map[string]blobovniczaWithIndex)
b.metrics.Close()
b.lruMtx.Unlock()
b.activeMtx.Unlock()
return nil return nil
} }
// opens and returns blobovnicza with path p. // returns blobovnicza with path p
// //
// If blobovnicza is already opened and cached, instance from cache is returned w/o changes. // If blobovnicza is already cached, instance from cache is returned w/o changes.
func (b *Blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, error) { func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
b.lruMtx.Lock() return b.dbCache.GetOrCreate(p)
v, ok := b.opened.Get(p)
b.lruMtx.Unlock()
if ok {
// blobovnicza should be opened in cache
return v, nil
}
lvlPath := filepath.Dir(p)
curIndex := u64FromHexString(filepath.Base(p))
b.activeMtx.RLock()
defer b.activeMtx.RUnlock()
active, ok := b.active[lvlPath]
if ok && active.ind == curIndex {
return active.blz, nil
}
b.lruMtx.Lock()
defer b.lruMtx.Unlock()
v, ok = b.opened.Get(p)
if ok {
return v, nil
}
blz, err := b.openBlobovniczaNoCache(p)
if err != nil {
return nil, err
}
b.opened.Add(p, blz)
return blz, nil
} }
func (b *Blobovniczas) openBlobovniczaNoCache(p string) (*blobovnicza.Blobovnicza, error) { func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
b.openMtx.Lock() return b.commondbManager.GetByPath(p)
defer b.openMtx.Unlock()
path := filepath.Join(b.rootPath, p)
blz := blobovnicza.New(append(b.blzOpts,
blobovnicza.WithReadOnly(b.readOnly),
blobovnicza.WithPath(path),
blobovnicza.WithMetrics(b.metrics.Blobovnizca()),
)...)
if err := blz.Open(); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
if err := blz.Init(); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", p, err)
}
return blz, nil
} }

View file

@ -3,7 +3,6 @@ package blobovniczatree
import ( import (
"context" "context"
"encoding/hex" "encoding/hex"
"path/filepath"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -48,10 +47,12 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if prm.StorageID != nil { if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID) id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String()) shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
if err != nil { if err != nil {
return res, err return res, err
} }
defer shBlz.Close()
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil { if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true success = true
@ -59,16 +60,10 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
return res, err return res, err
} }
activeCache := make(map[string]struct{})
objectFound := false objectFound := false
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) { err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
dirPath := filepath.Dir(p) res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
// don't process active blobovnicza of the level twice
_, ok := activeCache[dirPath]
res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil { if err != nil {
if !client.IsErrObjectNotFound(err) { if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
@ -78,8 +73,6 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
} }
} }
activeCache[dirPath] = struct{}{}
if err == nil { if err == nil {
objectFound = true objectFound = true
} }
@ -100,57 +93,13 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
// tries to delete object from particular blobovnicza. // tries to delete object from particular blobovnicza.
// //
// returns no error if object was removed from some blobovnicza of the same level. // returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool) (common.DeleteRes, error) { func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
lvlPath := filepath.Dir(blzPath) shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
// try to remove from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.deleteObject(ctx, v, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.deleteObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside)
blz, err := b.openBlobovnicza(blzPath)
if err != nil { if err != nil {
return common.DeleteRes{}, err return common.DeleteRes{}, err
} }
defer shBlz.Close()
return b.deleteObject(ctx, blz, prm) return b.deleteObject(ctx, blz, prm)
} }

View file

@ -7,6 +7,8 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
) )
var errClosed = logicerr.New("blobvnicza is closed")
func isErrOutOfRange(err error) bool { func isErrOutOfRange(err error) bool {
var target *apistatus.ObjectOutOfRange var target *apistatus.ObjectOutOfRange
return errors.As(err, &target) return errors.As(err, &target)

View file

@ -3,7 +3,6 @@ package blobovniczatree
import ( import (
"context" "context"
"encoding/hex" "encoding/hex"
"path/filepath"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -37,26 +36,22 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if prm.StorageID != nil { if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID) id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String()) shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
if err != nil { if err != nil {
return common.ExistsRes{}, err return common.ExistsRes{}, err
} }
defer shBlz.Close()
exists, err := blz.Exists(ctx, prm.Address) exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err return common.ExistsRes{Exists: exists}, err
} }
activeCache := make(map[string]struct{})
var gPrm blobovnicza.GetPrm var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address) gPrm.SetAddress(prm.Address)
err := b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) { err := b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
dirPath := filepath.Dir(p) _, err := b.getObjectFromLevel(ctx, gPrm, p)
_, ok := activeCache[dirPath]
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
if err != nil { if err != nil {
if !client.IsErrObjectNotFound(err) { if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
@ -65,7 +60,6 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
} }
} }
activeCache[dirPath] = struct{}{}
found = err == nil found = err == nil
return found, nil return found, nil
}) })

View file

@ -9,16 +9,15 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/util/slice" "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestExistsInvalidStorageID(t *testing.T) { func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
b := NewBlobovniczaTree( b := NewBlobovniczaTree(
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(1024), WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2), WithBlobovniczaShallowDepth(2),

View file

@ -5,8 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"go.uber.org/zap"
) )
func TestGeneric(t *testing.T) { func TestGeneric(t *testing.T) {
@ -14,7 +13,7 @@ func TestGeneric(t *testing.T) {
helper := func(t *testing.T, dir string) common.Storage { helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree( return NewBlobovniczaTree(
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(maxObjectSize), WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2), WithBlobovniczaShallowDepth(2),
@ -41,7 +40,7 @@ func TestControl(t *testing.T) {
newTree := func(t *testing.T) common.Storage { newTree := func(t *testing.T) common.Storage {
return NewBlobovniczaTree( return NewBlobovniczaTree(
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(maxObjectSize), WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2), WithBlobovniczaShallowDepth(2),

View file

@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"path/filepath"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -48,10 +47,12 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if prm.StorageID != nil { if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID) id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String()) shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
if err != nil { if err != nil {
return res, err return res, err
} }
defer shBlz.Close()
res, err = b.getObject(ctx, blz, bPrm) res, err = b.getObject(ctx, blz, bPrm)
if err == nil { if err == nil {
@ -61,14 +62,8 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
return res, err return res, err
} }
activeCache := make(map[string]struct{})
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) { err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
dirPath := filepath.Dir(p) res, err = b.getObjectFromLevel(ctx, bPrm, p)
_, ok := activeCache[dirPath]
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil { if err != nil {
if !client.IsErrObjectNotFound(err) { if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
@ -78,8 +73,6 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
} }
} }
activeCache[dirPath] = struct{}{}
// abort iterator if found, otherwise process all Blobovniczas // abort iterator if found, otherwise process all Blobovniczas
return err == nil, nil return err == nil, nil
}) })
@ -98,58 +91,14 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// tries to read object from particular blobovnicza. // tries to read object from particular blobovnicza.
// //
// returns error if object could not be read from any blobovnicza of the same level. // returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string, tryActive bool) (common.GetRes, error) { func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.getObject(ctx, v, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the freshest objects are probably the most demanded;
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside) // open blobovnicza (cached inside)
blz, err := b.openBlobovnicza(blzPath) shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
if err != nil { if err != nil {
return common.GetRes{}, err return common.GetRes{}, err
} }
defer shBlz.Close()
return b.getObject(ctx, blz, prm) return b.getObject(ctx, blz, prm)
} }

View file

@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"path/filepath"
"strconv" "strconv"
"time" "time"
@ -47,10 +46,12 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if prm.StorageID != nil { if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID) id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String()) shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
if err != nil { if err != nil {
return common.GetRangeRes{}, err return common.GetRangeRes{}, err
} }
defer shBlz.Close()
res, err := b.getObjectRange(ctx, blz, prm) res, err := b.getObjectRange(ctx, blz, prm)
if err == nil { if err == nil {
@ -60,15 +61,10 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
return res, err return res, err
} }
activeCache := make(map[string]struct{})
objectFound := false objectFound := false
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) { err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
dirPath := filepath.Dir(p) res, err = b.getRangeFromLevel(ctx, prm, p)
_, ok := activeCache[dirPath]
res, err = b.getRangeFromLevel(ctx, prm, p, !ok)
if err != nil { if err != nil {
outOfBounds := isErrOutOfRange(err) outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) { if !outOfBounds && !client.IsErrObjectNotFound(err) {
@ -82,8 +78,6 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
} }
} }
activeCache[dirPath] = struct{}{}
objectFound = err == nil objectFound = err == nil
// abort iterator if found, otherwise process all Blobovniczas // abort iterator if found, otherwise process all Blobovniczas
@ -106,68 +100,14 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// tries to read range of object payload data from particular blobovnicza. // tries to read range of object payload data from particular blobovnicza.
// //
// returns error if object could not be read from any blobovnicza of the same level. // returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string, tryActive bool) (common.GetRangeRes, error) { func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
res, err := b.getObjectRange(ctx, v, prm)
switch {
case err == nil,
isErrOutOfRange(err):
return res, err
default:
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the freshest objects are probably the most demanded;
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
res, err := b.getObjectRange(ctx, active.blz, prm)
switch {
case err == nil,
isErrOutOfRange(err):
return res, err
default:
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside) // open blobovnicza (cached inside)
blz, err := b.openBlobovnicza(blzPath) shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
if err != nil { if err != nil {
return common.GetRangeRes{}, err return common.GetRangeRes{}, err
} }
defer shBlz.Close()
return b.getObjectRange(ctx, blz, prm) return b.getObjectRange(ctx, blz, prm)
} }

View file

@ -68,13 +68,15 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return. // iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error { func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
return b.iterateLeaves(ctx, func(p string) (bool, error) { return b.iterateLeaves(ctx, func(p string) (bool, error) {
blz, err := b.openBlobovnicza(p) shBlz := b.getBlobovnicza(p)
blz, err := shBlz.Open()
if err != nil { if err != nil {
if ignoreErrors { if ignoreErrors {
return false, nil return false, nil
} }
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
} }
defer shBlz.Close()
err = f(p, blz) err = f(p, blz)
@ -115,7 +117,12 @@ func (b *Blobovniczas) iterateDeepest(ctx context.Context, addr oid.Address, f f
// iterator over particular level of directories. // iterator over particular level of directories.
func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) { func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
indices := indexSlice(b.blzShallowWidth) isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
levelWidth := b.blzShallowWidth
if isLeafLevel {
levelWidth = b.blzLeafWidth
}
indices := indexSlice(levelWidth)
hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...))) hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))

View file

@ -0,0 +1,242 @@
package blobovniczatree
import (
"fmt"
"path/filepath"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
// sharedDB is responsible for opening and closing a file of single blobovnicza.
type sharedDB struct {
guard *sync.RWMutex
blcza *blobovnicza.Blobovnicza
refCount uint32
openDBCounter *openDBCounter
closedFlag *atomic.Bool
options []blobovnicza.Option
path string
readOnly bool
metrics blobovnicza.Metrics
log *logger.Logger
}
func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger) *sharedDB {
return &sharedDB{
guard: &sync.RWMutex{},
options: options,
path: path,
readOnly: readOnly,
metrics: metrics,
closedFlag: closedFlag,
log: log,
openDBCounter: openDBCounter,
}
}
func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
b.guard.Lock()
defer b.guard.Unlock()
if b.refCount > 0 {
b.refCount++
return b.blcza, nil
}
blz := blobovnicza.New(append(b.options,
blobovnicza.WithReadOnly(b.readOnly),
blobovnicza.WithPath(b.path),
blobovnicza.WithMetrics(b.metrics),
)...)
if err := blz.Open(); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
b.refCount++
b.blcza = blz
b.openDBCounter.Inc()
return blz, nil
}
func (b *sharedDB) Close() {
b.guard.Lock()
defer b.guard.Unlock()
if b.refCount == 0 {
b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
return
}
if b.refCount == 1 {
b.refCount = 0
if err := b.blcza.Close(); err != nil {
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)
}
b.blcza = nil
b.openDBCounter.Dec()
return
}
b.refCount--
}
func (b *sharedDB) Path() string {
return b.path
}
// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
type levelDbManager struct {
databases []*sharedDB
}
func newLevelDBManager(width uint64, options []blobovnicza.Option, rootPath string, lvlPath string,
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlog *atomic.Bool, log *logger.Logger) *levelDbManager {
result := &levelDbManager{
databases: make([]*sharedDB, width),
}
for idx := uint64(0); idx < width; idx++ {
result.databases[idx] = newSharedDB(options, filepath.Join(rootPath, lvlPath, u64ToHexString(idx)), readOnly, metrics, openDBCounter, closedFlog, log)
}
return result
}
func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
return m.databases[idx]
}
// dbManager manages the opening and closing of blobovnicza instances.
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
levelToManager map[string]*levelDbManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
rootPath string
options []blobovnicza.Option
readOnly bool
metrics blobovnicza.Metrics
leafWidth uint64
log *logger.Logger
}
func newDBManager(rootPath string, options []blobovnicza.Option, leafWidth uint64, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager {
return &dbManager{
rootPath: rootPath,
options: options,
readOnly: readOnly,
metrics: metrics,
leafWidth: leafWidth,
levelToManager: make(map[string]*levelDbManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
dbCounter: newOpenDBCounter(),
}
}
func (m *dbManager) GetByPath(path string) *sharedDB {
lvlPath := filepath.Dir(path)
curIndex := u64FromHexString(filepath.Base(path))
levelManager := m.getLevelManager(lvlPath)
return levelManager.GetByIndex(curIndex)
}
func (m *dbManager) Open() {
m.closedFlag.Store(false)
}
func (m *dbManager) Close() {
m.closedFlag.Store(true)
m.dbCounter.WaitUntilAllClosed()
}
func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
}
return m.getOrCreateLevelManager(lvlPath)
}
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()
if result, ok := m.levelToManager[lvlPath]; ok {
return result
}
result := newLevelDBManager(m.leafWidth, m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log)
m.levelToManager[lvlPath] = result
return result
}
type openDBCounter struct {
cond *sync.Cond
count uint64
}
func newOpenDBCounter() *openDBCounter {
return &openDBCounter{
cond: &sync.Cond{
L: &sync.Mutex{},
},
}
}
func (c *openDBCounter) Inc() {
c.cond.L.Lock()
defer c.cond.L.Unlock()
c.count++
}
func (c *openDBCounter) Dec() {
c.cond.L.Lock()
defer c.cond.L.Unlock()
if c.count > 0 {
c.count--
}
if c.count == 0 {
c.cond.Broadcast()
}
}
func (c *openDBCounter) WaitUntilAllClosed() {
c.cond.L.Lock()
for c.count > 0 {
c.cond.Wait()
}
c.cond.L.Unlock()
}

View file

@ -7,7 +7,7 @@ import (
) )
type Metrics interface { type Metrics interface {
Blobovnizca() blobovnicza.Metrics Blobovnicza() blobovnicza.Metrics
SetParentID(parentID string) SetParentID(parentID string)
@ -33,6 +33,6 @@ func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Get(time.Duration, int, bool, bool) {} func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Iterate(time.Duration, bool) {} func (m *noopMetrics) Iterate(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {} func (m *noopMetrics) Put(time.Duration, int, bool) {}
func (m *noopMetrics) Blobovnizca() blobovnicza.Metrics { func (m *noopMetrics) Blobovnicza() blobovnicza.Metrics {
return &blobovnicza.NoopMetrics{} return &blobovnicza.NoopMetrics{}
} }

View file

@ -17,6 +17,7 @@ type cfg struct {
openedCacheSize int openedCacheSize int
blzShallowDepth uint64 blzShallowDepth uint64
blzShallowWidth uint64 blzShallowWidth uint64
blzLeafWidth uint64
compression *compression.Config compression *compression.Config
blzOpts []blobovnicza.Option blzOpts []blobovnicza.Option
// reportError is the function called when encountering disk errors. // reportError is the function called when encountering disk errors.
@ -64,6 +65,12 @@ func WithBlobovniczaShallowWidth(width uint64) Option {
} }
} }
func WithBlobovniczaLeafWidth(w uint64) Option {
return func(c *cfg) {
c.blzLeafWidth = w
}
}
func WithBlobovniczaShallowDepth(depth uint64) Option { func WithBlobovniczaShallowDepth(depth uint64) Option {
return func(c *cfg) { return func(c *cfg) {
c.blzShallowDepth = depth c.blzShallowDepth = depth

View file

@ -2,7 +2,6 @@ package blobovniczatree
import ( import (
"context" "context"
"errors"
"path/filepath" "path/filepath"
"time" "time"
@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.uber.org/zap" "go.uber.org/zap"
@ -76,8 +74,8 @@ type putIterator struct {
PutPrm blobovnicza.PutPrm PutPrm blobovnicza.PutPrm
} }
func (i *putIterator) iterate(ctx context.Context, path string) (bool, error) { func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
active, err := i.B.getActivated(path) active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
if err != nil { if err != nil {
if !isLogical(err) { if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
@ -89,46 +87,29 @@ func (i *putIterator) iterate(ctx context.Context, path string) (bool, error) {
return false, nil return false, nil
} }
if _, err := active.blz.Put(ctx, i.PutPrm); err != nil { if active == nil {
// Check if blobovnicza is full. We could either receive `blobovnicza.ErrFull` error i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
// or update active blobovnicza in other thread. In the latter case the database will be closed
// and `updateActive` takes care of not updating the active blobovnicza twice.
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
if isFull {
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
}
if err := i.B.updateActive(path, &active.ind); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err)
} else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza,
zap.String("level", path),
zap.String("error", err.Error()))
}
return false, nil return false, nil
} }
defer active.Close()
return i.iterate(ctx, path)
}
i.AllFull = false i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if !isLogical(err) { if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else { } else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))), zap.String("path", active.Path()),
zap.String("error", err.Error())) zap.String("error", err.Error()))
} }
return false, nil return false, nil
} }
path = filepath.Join(path, u64ToHexString(active.ind)) idx := u64FromHexString(filepath.Base(active.Path()))
i.ID = blobovnicza.NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx))))
i.ID = blobovnicza.NewIDFromBytes([]byte(path))
return true, nil return true, nil
} }

View file

@ -7,16 +7,15 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestSimpleLifecycle(t *testing.T) { func TestSimpleLifecycle(t *testing.T) {
s := New( s := New(
WithRootPath("memstore"), WithRootPath("memstore"),
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
) )
t.Cleanup(func() { _ = s.Close() }) t.Cleanup(func() { _ = s.Close() })
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))

View file

@ -22,11 +22,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.uber.org/zap"
) )
// TestInitializationFailure checks that shard is initialized and closed even if media // TestInitializationFailure checks that shard is initialized and closed even if media
@ -54,7 +53,7 @@ func TestInitializationFailure(t *testing.T) {
return []shard.Option{ return []shard.Option{
shard.WithID(sid), shard.WithID(sid),
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions( shard.WithBlobStorOptions(
blobstor.WithStorages(storages)), blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions( shard.WithMetaBaseOptions(
@ -295,7 +294,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
te := testNewEngine(t). te := testNewEngine(t).
setShardsNumOpts(t, num, func(id int) []shard.Option { setShardsNumOpts(t, num, func(id int) []shard.Option {
return []shard.Option{ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions( shard.WithBlobStorOptions(
blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))), blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
shard.WithMetaBaseOptions( shard.WithMetaBaseOptions(

View file

@ -6,14 +6,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestDeleteBigObject(t *testing.T) { func TestDeleteBigObject(t *testing.T) {
@ -54,7 +53,7 @@ func TestDeleteBigObject(t *testing.T) {
s3 := testNewShard(t, 3) s3 := testNewShard(t, 3)
e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
e.log = &logger.Logger{Logger: zap.L()} e.log = test.NewLogger(t, true)
defer e.Close() defer e.Close()
for i := range children { for i := range children {

View file

@ -15,14 +15,13 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/hrw" "git.frostfs.info/TrueCloudLab/hrw"
"github.com/panjf2000/ants/v2" "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
type epochState struct{} type epochState struct{}
@ -80,7 +79,7 @@ type testEngineWrapper struct {
} }
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper { func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
engine := New(WithLogger(&logger.Logger{Logger: zap.L()})) engine := New(WithLogger(test.NewLogger(t, true)))
for _, opt := range opts { for _, opt := range opts {
opt(engine.cfg) opt(engine.cfg)
} }
@ -199,7 +198,7 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
func testDefaultShardOptions(t testing.TB, id int) []shard.Option { func testDefaultShardOptions(t testing.TB, id int) []shard.Option {
return []shard.Option{ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions( shard.WithBlobStorOptions(
blobstor.WithStorages( blobstor.WithStorages(
newStorages(t.TempDir(), 1<<20))), newStorages(t.TempDir(), 1<<20))),

View file

@ -17,11 +17,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
const errSmallSize = 256 const errSmallSize = 256
@ -56,7 +55,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
largeFileStorage: largeFileStorage, largeFileStorage: largeFileStorage,
} }
return []shard.Option{ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(blobstor.WithStorages(storages)), shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions( shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))), meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),

View file

@ -16,12 +16,11 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -31,7 +30,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
te := testNewEngine(t). te := testNewEngine(t).
setShardsNumOpts(t, shardNum, func(id int) []shard.Option { setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
return []shard.Option{ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions( shard.WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{{ blobstor.WithStorages([]blobstor.SubStorage{{
Storage: fstree.New( Storage: fstree.New(

View file

@ -12,11 +12,10 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType { func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
@ -66,7 +65,7 @@ func TestListWithCursor(t *testing.T) {
t.Parallel() t.Parallel()
e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option { e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
return []shard.Option{ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions( shard.WithBlobStorOptions(
blobstor.WithStorages( blobstor.WithStorages(
newStorages(t.TempDir(), 1<<20))), newStorages(t.TempDir(), 1<<20))),

View file

@ -3,7 +3,7 @@ package engine
import ( import (
"context" "context"
"errors" "errors"
"fmt" "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@ -145,7 +145,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -177,7 +177,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -208,7 +208,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()
@ -333,7 +333,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()

View file

@ -80,7 +80,7 @@ func BenchmarkPut(b *testing.B) {
}) })
} }
func TestDB_PutBlobovnicaUpdate(t *testing.T) { func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t) db := newDB(t)
raw1 := testutil.GenerateObject() raw1 := testutil.GenerateObject()

View file

@ -22,7 +22,7 @@ type blobovniczaTreeMetrics struct {
m metrics_impl.BlobobvnizcaMetrics m metrics_impl.BlobobvnizcaMetrics
} }
func (m *blobovniczaTreeMetrics) Blobovnizca() blobovnicza.Metrics { func (m *blobovniczaTreeMetrics) Blobovnicza() blobovnicza.Metrics {
return &blobovniczaMetrics{ return &blobovniczaMetrics{
shardID: func() string { return m.shardID }, shardID: func() string { return m.shardID },
path: m.path, path: m.path,
@ -81,18 +81,26 @@ type blobovniczaMetrics struct {
path string path string
} }
func (m *blobovniczaMetrics) AddOpenBlobovnizcaSize(size uint64) { func (m *blobovniczaMetrics) AddOpenBlobovniczaSize(size uint64) {
m.m.AddOpenBlobovnizcaSize(m.shardID(), m.path, size) m.m.AddOpenBlobovniczaSize(m.shardID(), m.path, size)
} }
func (m *blobovniczaMetrics) SubOpenBlobovnizcaSize(size uint64) { func (m *blobovniczaMetrics) SubOpenBlobovniczaSize(size uint64) {
m.m.SubOpenBlobovnizcaSize(m.shardID(), m.path, size) m.m.SubOpenBlobovniczaSize(m.shardID(), m.path, size)
} }
func (m *blobovniczaMetrics) IncOpenBlobovnizcaCount() { func (m *blobovniczaMetrics) IncOpenBlobovniczaCount() {
m.m.IncOpenBlobovnizcaCount(m.shardID(), m.path) m.m.IncOpenBlobovniczaCount(m.shardID(), m.path)
} }
func (m *blobovniczaMetrics) DecOpenBlobovnizcaCount() { func (m *blobovniczaMetrics) DecOpenBlobovniczaCount() {
m.m.DecOpenBlobovnizcaCount(m.shardID(), m.path) m.m.DecOpenBlobovniczaCount(m.shardID(), m.path)
}
func (m *blobovniczaMetrics) AddOpenBlobovniczaItems(items uint64) {
m.m.AddOpenBlobovniczaItems(m.shardID(), m.path, items)
}
func (m *blobovniczaMetrics) SubOpenBlobovniczaItems(items uint64) {
m.m.SubOpenBlobovniczaItems(m.shardID(), m.path, items)
} }

View file

@ -9,6 +9,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"sync" "sync"
"time" "time"
@ -291,7 +292,7 @@ func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()
@ -889,7 +890,7 @@ func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID stri
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -937,7 +938,7 @@ func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -1046,7 +1047,7 @@ func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID str
trace.WithAttributes( trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()

View file

@ -19,7 +19,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -30,7 +30,6 @@ import (
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.uber.org/zap"
) )
type epochState struct{} type epochState struct{}
@ -75,7 +74,7 @@ func TestShardOpen(t *testing.T) {
newShard := func() *Shard { newShard := func() *Shard {
return New( return New(
WithID(NewIDFromBytes([]byte{})), WithID(NewIDFromBytes([]byte{})),
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithBlobStorOptions( WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{ blobstor.WithStorages([]blobstor.SubStorage{
{Storage: st}, {Storage: st},

View file

@ -3,6 +3,7 @@ package shard_test
import ( import (
"context" "context"
"testing" "testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@ -56,8 +57,10 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err = sh.Delete(context.TODO(), delPrm) _, err = sh.Delete(context.TODO(), delPrm)
require.NoError(t, err) require.NoError(t, err)
require.Eventually(t, func() bool {
_, err = sh.Get(context.Background(), getPrm) _, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err)) return client.IsErrObjectNotFound(err)
}, time.Second, 50*time.Millisecond)
}) })
t.Run("small object", func(t *testing.T) { t.Run("small object", func(t *testing.T) {
@ -80,7 +83,9 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err = sh.Delete(context.Background(), delPrm) _, err = sh.Delete(context.Background(), delPrm)
require.NoError(t, err) require.NoError(t, err)
require.Eventually(t, func() bool {
_, err = sh.Get(context.Background(), getPrm) _, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err)) return client.IsErrObjectNotFound(err)
}, time.Second, 50*time.Millisecond)
}) })
} }

View file

@ -15,14 +15,13 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/panjf2000/ants/v2" "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
@ -31,14 +30,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
rootPath := t.TempDir() rootPath := t.TempDir()
var sh *Shard var sh *Shard
l := test.NewLogger(t, true)
l := &logger.Logger{Logger: zap.L()}
blobOpts := []blobstor.Option{ blobOpts := []blobstor.Option{
blobstor.WithLogger(&logger.Logger{Logger: zap.L()}), blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithStorages([]blobstor.SubStorage{ blobstor.WithStorages([]blobstor.SubStorage{
{ {
Storage: blobovniczatree.NewBlobovniczaTree( Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}), blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)), blobovniczatree.WithBlobovniczaShallowWidth(1)),

View file

@ -3,6 +3,7 @@ package shard_test
import ( import (
"context" "context"
"path/filepath" "path/filepath"
"sync"
"testing" "testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -19,6 +20,7 @@ import (
) )
type metricsStore struct { type metricsStore struct {
mtx sync.Mutex
objCounters map[string]uint64 objCounters map[string]uint64
cnrSize map[string]int64 cnrSize map[string]int64
pldSize int64 pldSize int64
@ -26,13 +28,40 @@ type metricsStore struct {
errCounter int64 errCounter int64
} }
func (m metricsStore) SetShardID(_ string) {} func (m *metricsStore) SetShardID(_ string) {}
func (m metricsStore) SetObjectCounter(objectType string, v uint64) { func (m *metricsStore) SetObjectCounter(objectType string, v uint64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.objCounters[objectType] = v m.objCounters[objectType] = v
} }
func (m metricsStore) AddToObjectCounter(objectType string, delta int) { func (m *metricsStore) getObjectCounter(objectType string) uint64 {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.objCounters[objectType]
}
func (m *metricsStore) containerSizes() map[string]int64 {
m.mtx.Lock()
defer m.mtx.Unlock()
r := make(map[string]int64, len(m.cnrSize))
for c, s := range m.cnrSize {
r[c] = s
}
return r
}
func (m *metricsStore) payloadSize() int64 {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.pldSize
}
func (m *metricsStore) AddToObjectCounter(objectType string, delta int) {
m.mtx.Lock()
defer m.mtx.Unlock()
switch { switch {
case delta > 0: case delta > 0:
m.objCounters[objectType] += uint64(delta) m.objCounters[objectType] += uint64(delta)
@ -49,35 +78,51 @@ func (m metricsStore) AddToObjectCounter(objectType string, delta int) {
} }
} }
func (m metricsStore) IncObjectCounter(objectType string) { func (m *metricsStore) IncObjectCounter(objectType string) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.objCounters[objectType] += 1 m.objCounters[objectType] += 1
} }
func (m metricsStore) DecObjectCounter(objectType string) { func (m *metricsStore) DecObjectCounter(objectType string) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.AddToObjectCounter(objectType, -1) m.AddToObjectCounter(objectType, -1)
} }
func (m *metricsStore) SetMode(mode mode.Mode) { func (m *metricsStore) SetMode(mode mode.Mode) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.mode = mode m.mode = mode
} }
func (m metricsStore) AddToContainerSize(cnr string, size int64) { func (m *metricsStore) AddToContainerSize(cnr string, size int64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.cnrSize[cnr] += size m.cnrSize[cnr] += size
} }
func (m *metricsStore) AddToPayloadSize(size int64) { func (m *metricsStore) AddToPayloadSize(size int64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.pldSize += size m.pldSize += size
} }
func (m *metricsStore) IncErrorCounter() { func (m *metricsStore) IncErrorCounter() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter += 1 m.errCounter += 1
} }
func (m *metricsStore) ClearErrorCounter() { func (m *metricsStore) ClearErrorCounter() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter = 0 m.errCounter = 0
} }
func (m *metricsStore) DeleteShardMetrics() { func (m *metricsStore) DeleteShardMetrics() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter = 0 m.errCounter = 0
} }
@ -102,10 +147,10 @@ func TestCounters(t *testing.T) {
} }
t.Run("defaults", func(t *testing.T) { t.Run("defaults", func(t *testing.T) {
require.Zero(t, mm.objCounters[physical]) require.Zero(t, mm.getObjectCounter(physical))
require.Zero(t, mm.objCounters[logical]) require.Zero(t, mm.getObjectCounter(logical))
require.Empty(t, mm.cnrSize) require.Empty(t, mm.containerSizes())
require.Zero(t, mm.pldSize) require.Zero(t, mm.payloadSize())
}) })
var totalPayload int64 var totalPayload int64
@ -128,10 +173,10 @@ func TestCounters(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
require.Equal(t, uint64(objNumber), mm.objCounters[physical]) require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber), mm.objCounters[logical]) require.Equal(t, uint64(objNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.cnrSize) require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.pldSize) require.Equal(t, totalPayload, mm.payloadSize())
}) })
t.Run("inhume_GC", func(t *testing.T) { t.Run("inhume_GC", func(t *testing.T) {
@ -145,10 +190,10 @@ func TestCounters(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
require.Equal(t, uint64(objNumber), mm.objCounters[physical]) require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber-inhumedNumber), mm.objCounters[logical]) require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.cnrSize) require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.pldSize) require.Equal(t, totalPayload, mm.payloadSize())
oo = oo[inhumedNumber:] oo = oo[inhumedNumber:]
}) })
@ -157,8 +202,8 @@ func TestCounters(t *testing.T) {
var prm shard.InhumePrm var prm shard.InhumePrm
ts := objectcore.AddressOf(testutil.GenerateObject()) ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.objCounters[physical] phy := mm.getObjectCounter(physical)
logic := mm.objCounters[logical] logic := mm.getObjectCounter(logical)
inhumedNumber := int(phy / 4) inhumedNumber := int(phy / 4)
prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...) prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
@ -166,10 +211,10 @@ func TestCounters(t *testing.T) {
_, err := sh.Inhume(context.Background(), prm) _, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, phy, mm.objCounters[physical]) require.Equal(t, phy, mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(inhumedNumber), mm.objCounters[logical]) require.Equal(t, logic-uint64(inhumedNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.cnrSize) require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.pldSize) require.Equal(t, totalPayload, mm.payloadSize())
oo = oo[inhumedNumber:] oo = oo[inhumedNumber:]
}) })
@ -177,8 +222,8 @@ func TestCounters(t *testing.T) {
t.Run("Delete", func(t *testing.T) { t.Run("Delete", func(t *testing.T) {
var prm shard.DeletePrm var prm shard.DeletePrm
phy := mm.objCounters[physical] phy := mm.getObjectCounter(physical)
logic := mm.objCounters[logical] logic := mm.getObjectCounter(logical)
deletedNumber := int(phy / 4) deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...) prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
@ -186,8 +231,8 @@ func TestCounters(t *testing.T) {
_, err := sh.Delete(context.Background(), prm) _, err := sh.Delete(context.Background(), prm)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical]) require.Equal(t, phy-uint64(deletedNumber), mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(deletedNumber), mm.objCounters[logical]) require.Equal(t, logic-uint64(deletedNumber), mm.getObjectCounter(logical))
var totalRemovedpayload uint64 var totalRemovedpayload uint64
for i := range oo[:deletedNumber] { for i := range oo[:deletedNumber] {
removedPayload := oo[i].PayloadSize() removedPayload := oo[i].PayloadSize()
@ -196,8 +241,8 @@ func TestCounters(t *testing.T) {
cnr, _ := oo[i].ContainerID() cnr, _ := oo[i].ContainerID()
expectedSizes[cnr.EncodeToString()] -= int64(removedPayload) expectedSizes[cnr.EncodeToString()] -= int64(removedPayload)
} }
require.Equal(t, expectedSizes, mm.cnrSize) require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.pldSize) require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.payloadSize())
}) })
} }

View file

@ -14,12 +14,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/nspcc-dev/neo-go/pkg/util/slice" "github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestShard_GetRange(t *testing.T) { func TestShard_GetRange(t *testing.T) {
@ -82,7 +81,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
[]blobstor.Option{blobstor.WithStorages([]blobstor.SubStorage{ []blobstor.Option{blobstor.WithStorages([]blobstor.SubStorage{
{ {
Storage: blobovniczatree.NewBlobovniczaTree( Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}), blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)), blobovniczatree.WithBlobovniczaShallowWidth(1)),

View file

@ -11,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -20,7 +20,6 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestShardReload(t *testing.T) { func TestShardReload(t *testing.T) {
@ -28,8 +27,7 @@ func TestShardReload(t *testing.T) {
p := t.Name() p := t.Name()
defer os.RemoveAll(p) defer os.RemoveAll(p)
l := test.NewLogger(t, true)
l := &logger.Logger{Logger: zap.L()}
blobOpts := []blobstor.Option{ blobOpts := []blobstor.Option{
blobstor.WithLogger(l), blobstor.WithLogger(l),
blobstor.WithStorages([]blobstor.SubStorage{ blobstor.WithStorages([]blobstor.SubStorage{

View file

@ -16,12 +16,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/panjf2000/ants/v2" "github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
type epochState struct { type epochState struct {
@ -59,11 +58,11 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
if bsOpts == nil { if bsOpts == nil {
bsOpts = []blobstor.Option{ bsOpts = []blobstor.Option{
blobstor.WithLogger(&logger.Logger{Logger: zap.L()}), blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithStorages([]blobstor.SubStorage{ blobstor.WithStorages([]blobstor.SubStorage{
{ {
Storage: blobovniczatree.NewBlobovniczaTree( Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}), blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)), blobovniczatree.WithBlobovniczaShallowWidth(1)),
@ -81,7 +80,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
opts := []shard.Option{ opts := []shard.Option{
shard.WithID(shard.NewIDFromBytes([]byte{})), shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithLogger(test.NewLogger(t, true)),
shard.WithBlobStorOptions(bsOpts...), shard.WithBlobStorOptions(bsOpts...),
shard.WithMetaBaseOptions( shard.WithMetaBaseOptions(
append([]meta.Option{ append([]meta.Option{
@ -102,7 +101,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
require.NoError(t, err) require.NoError(t, err)
return pool return pool
}), }),
shard.WithGCRemoverSleepInterval(1 * time.Millisecond), shard.WithGCRemoverSleepInterval(100 * time.Millisecond),
} }
sh = shard.New(opts...) sh = shard.New(opts...)

View file

@ -2,7 +2,7 @@ package shard
import ( import (
"context" "context"
"fmt" "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -140,7 +140,7 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
attribute.String("shard_id", s.ID().String()), attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -165,7 +165,7 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
attribute.String("shard_id", s.ID().String()), attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)), attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
), ),
) )
defer span.End() defer span.End()
@ -190,7 +190,7 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
attribute.String("shard_id", s.ID().String()), attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()
@ -309,7 +309,7 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
attribute.String("shard_id", s.ID().String()), attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()), attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID), attribute.String("tree_id", treeID),
attribute.String("height", fmt.Sprintf("%d", height)), attribute.String("height", strconv.FormatUint(height, 10)),
), ),
) )
defer span.End() defer span.End()

View file

@ -21,6 +21,10 @@ type cache struct {
// flushCh is a channel with objects to flush. // flushCh is a channel with objects to flush.
flushCh chan *objectSDK.Object flushCh chan *objectSDK.Object
// scheduled4Flush contains objects scheduled for flush via flushCh
// helps to avoid multiple flushing of one object
scheduled4Flush map[oid.Address]struct{}
scheduled4FlushMtx sync.RWMutex
// closeCh is close channel, protected by modeMtx. // closeCh is close channel, protected by modeMtx.
closeCh chan struct{} closeCh chan struct{}
// wg is a wait group for flush workers. // wg is a wait group for flush workers.
@ -49,6 +53,7 @@ func New(opts ...Option) writecache.Cache {
c := &cache{ c := &cache{
flushCh: make(chan *objectSDK.Object), flushCh: make(chan *objectSDK.Object),
mode: mode.ReadWrite, mode: mode.ReadWrite,
scheduled4Flush: map[oid.Address]struct{}{},
options: options{ options: options{
log: &logger.Logger{Logger: zap.NewNop()}, log: &logger.Logger{Logger: zap.NewNop()},

View file

@ -39,10 +39,10 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
return writecache.ErrReadOnly return writecache.ErrReadOnly
} }
saddr := addr.EncodeToString() key := addr2key(addr)
err := c.db.Update(func(tx *badger.Txn) error { err := c.db.Update(func(tx *badger.Txn) error {
it, err := tx.Get([]byte(saddr)) it, err := tx.Get(key[:])
if err != nil { if err != nil {
if err == badger.ErrKeyNotFound { if err == badger.ErrKeyNotFound {
return logicerr.Wrap(new(apistatus.ObjectNotFound)) return logicerr.Wrap(new(apistatus.ObjectNotFound))
@ -51,10 +51,10 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
} }
if it.ValueSize() > 0 { if it.ValueSize() > 0 {
storageType = writecache.StorageTypeDB storageType = writecache.StorageTypeDB
err := tx.Delete([]byte(saddr)) err := tx.Delete(key[:])
if err == nil { if err == nil {
storagelog.Write(c.log, storagelog.Write(c.log,
storagelog.AddressField(saddr), storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType), storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("db DELETE"), storagelog.OpField("db DELETE"),
) )

View file

@ -18,7 +18,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
"github.com/mr-tron/base58" "github.com/dgraph-io/ristretto/z"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.uber.org/zap" "go.uber.org/zap"
@ -35,6 +35,65 @@ const (
defaultFlushInterval = time.Second defaultFlushInterval = time.Second
) )
type collector struct {
cache *cache
scheduled int
processed int
cancel func()
}
func (c *collector) Send(buf *z.Buffer) error {
list, err := badger.BufferToKVList(buf)
if err != nil {
return err
}
for _, kv := range list.Kv {
select {
case <-c.cache.closeCh:
c.cancel()
return nil
default:
}
if kv.StreamDone {
return nil
}
if c.scheduled >= flushBatchSize {
c.cancel()
return nil
}
if got, want := len(kv.Key), len(internalKey{}); got != want {
c.cache.log.Debug(
fmt.Sprintf("not expected db key len: got %d, want %d", got, want))
continue
}
c.processed++
obj := objectSDK.New()
val := bytes.Clone(kv.Value)
if err = obj.Unmarshal(val); err != nil {
continue
}
addr := objectCore.AddressOf(obj)
c.cache.scheduled4FlushMtx.RLock()
_, ok := c.cache.scheduled4Flush[addr]
c.cache.scheduled4FlushMtx.RUnlock()
if ok {
c.cache.log.Debug(logs.WritecacheBadgerObjAlreadyScheduled, zap.Stringer("obj", addr))
continue
}
c.cache.scheduled4FlushMtx.Lock()
c.cache.scheduled4Flush[addr] = struct{}{}
c.cache.scheduled4FlushMtx.Unlock()
c.scheduled++
select {
case c.cache.flushCh <- obj:
case <-c.cache.closeCh:
c.cancel()
return nil
}
}
return nil
}
// runFlushLoop starts background workers which periodically flush objects to the blobstor. // runFlushLoop starts background workers which periodically flush objects to the blobstor.
func (c *cache) runFlushLoop() { func (c *cache) runFlushLoop() {
for i := 0; i < c.workersCount; i++ { for i := 0; i < c.workersCount; i++ {
@ -62,17 +121,12 @@ func (c *cache) runFlushLoop() {
} }
func (c *cache) flushSmallObjects() { func (c *cache) flushSmallObjects() {
var lastKey internalKey
var m []objectInfo
for { for {
select { select {
case <-c.closeCh: case <-c.closeCh:
return return
default: default:
} }
m = m[:0]
c.modeMtx.RLock() c.modeMtx.RLock()
if c.readOnly() { if c.readOnly() {
c.modeMtx.RUnlock() c.modeMtx.RUnlock()
@ -86,61 +140,24 @@ func (c *cache) flushSmallObjects() {
c.modeMtx.RUnlock() c.modeMtx.RUnlock()
return return
} }
ctx, cancel := context.WithCancel(context.TODO())
_ = c.db.View(func(tx *badger.Txn) error { coll := collector{
it := tx.NewIterator(badger.DefaultIteratorOptions) cache: c,
defer it.Close() cancel: cancel,
if len(lastKey) == 0 {
it.Rewind()
} else {
it.Seek(lastKey[:])
if it.Valid() && bytes.Equal(it.Item().Key(), lastKey[:]) {
it.Next()
} }
stream := c.db.NewStream()
// All calls to Send are done by a single goroutine
stream.Send = coll.Send
if err := stream.Orchestrate(ctx); err != nil {
c.log.Debug(fmt.Sprintf(
"error during flushing object from wc: %s", err))
} }
for ; it.Valid() && len(m) < flushBatchSize; it.Next() {
if got, want := int(it.Item().KeySize()), len(lastKey); got != want {
return fmt.Errorf("invalid db key len: got %d, want %d", got, want)
}
it.Item().KeyCopy(lastKey[:])
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
m = append(m, objectInfo{
addr: lastKey.address(),
data: value,
})
}
return nil
})
var count int
for i := range m {
obj := objectSDK.New()
if err := obj.Unmarshal(m[i].data); err != nil {
continue
}
count++
select {
case c.flushCh <- obj:
case <-c.closeCh:
c.modeMtx.RUnlock()
return
}
}
if count == 0 {
c.modeMtx.RUnlock() c.modeMtx.RUnlock()
if coll.scheduled == 0 {
break break
} }
c.modeMtx.RUnlock()
c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache, c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
zap.Int("count", count), zap.Int("scheduled", coll.scheduled), zap.Int("processed", coll.processed))
zap.String("start", base58.Encode(lastKey[:])))
} }
} }
@ -167,13 +184,14 @@ func (c *cache) workerFlushSmall() {
return return
} }
addr := objectCore.AddressOf(obj)
err := c.flushObject(context.TODO(), obj, nil, writecache.StorageTypeDB) err := c.flushObject(context.TODO(), obj, nil, writecache.StorageTypeDB)
if err != nil { if err == nil {
// Error is handled in flushObject. c.deleteFromDB([]internalKey{addr2key(addr)})
continue
} }
c.scheduled4FlushMtx.Lock()
c.deleteFromDB([]string{objectCore.AddressOf(obj).EncodeToString()}) delete(c.scheduled4Flush, addr)
c.scheduled4FlushMtx.Unlock()
} }
} }

View file

@ -10,18 +10,17 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
) )
func TestFlush(t *testing.T) { func TestFlush(t *testing.T) {
createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache { createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
return New( return New(
append([]Option{ append([]Option{
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithPath(filepath.Join(t.TempDir(), "writecache")), WithPath(filepath.Join(t.TempDir(), "writecache")),
WithMetabase(mb), WithMetabase(mb),
WithBlobstor(bs), WithBlobstor(bs),
@ -53,8 +52,9 @@ func TestFlush(t *testing.T) {
Desc: "db, invalid object", Desc: "db, invalid object",
InjectFn: func(t *testing.T, wc writecache.Cache) { InjectFn: func(t *testing.T, wc writecache.Cache) {
c := wc.(*cache) c := wc.(*cache)
key := addr2key(oidtest.Address())
require.NoError(t, c.db.Update(func(tx *badger.Txn) error { require.NoError(t, c.db.Update(func(tx *badger.Txn) error {
return tx.Set([]byte(oidtest.Address().EncodeToString()), []byte{1, 2, 3}) return tx.Set(key[:], []byte{1, 2, 3})
})) }))
}, },
}, },

View file

@ -5,14 +5,13 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"go.uber.org/zap"
) )
func TestGeneric(t *testing.T) { func TestGeneric(t *testing.T) {
storagetest.TestAll(t, func(t *testing.T) storagetest.Component { storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New( return New(
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithFlushWorkersCount(2), WithFlushWorkersCount(2),
WithPath(t.TempDir()), WithPath(t.TempDir()),
WithGCInterval(1*time.Second)) WithGCInterval(1*time.Second))

View file

@ -58,7 +58,7 @@ func (c *cache) openStore(readOnly bool) error {
return nil return nil
} }
func (c *cache) deleteFromDB(keys []string) []string { func (c *cache) deleteFromDB(keys []internalKey) []internalKey {
if len(keys) == 0 { if len(keys) == 0 {
return keys return keys
} }
@ -67,7 +67,7 @@ func (c *cache) deleteFromDB(keys []string) []string {
var errorIndex int var errorIndex int
for errorIndex = range keys { for errorIndex = range keys {
if err := wb.Delete([]byte(keys[errorIndex])); err != nil { if err := wb.Delete(keys[errorIndex][:]); err != nil {
break break
} }
} }

View file

@ -13,18 +13,17 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.uber.org/zap"
) )
func TestFlush(t *testing.T) { func TestFlush(t *testing.T) {
createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache { createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
return New( return New(
append([]Option{ append([]Option{
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithPath(filepath.Join(t.TempDir(), "writecache")), WithPath(filepath.Join(t.TempDir(), "writecache")),
WithSmallObjectSize(smallSize), WithSmallObjectSize(smallSize),
WithMetabase(mb), WithMetabase(mb),

View file

@ -4,14 +4,13 @@ import (
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"go.uber.org/zap"
) )
func TestGeneric(t *testing.T) { func TestGeneric(t *testing.T) {
storagetest.TestAll(t, func(t *testing.T) storagetest.Component { storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New( return New(
WithLogger(&logger.Logger{Logger: zap.L()}), WithLogger(test.NewLogger(t, true)),
WithFlushWorkersCount(2), WithFlushWorkersCount(2),
WithPath(t.TempDir())) WithPath(t.TempDir()))
}) })

View file

@ -15,64 +15,74 @@ type BlobobvnizcaMetrics interface {
AddBlobobvnizcaTreePut(shardID, path string, size int) AddBlobobvnizcaTreePut(shardID, path string, size int)
AddBlobobvnizcaTreeGet(shardID, path string, size int) AddBlobobvnizcaTreeGet(shardID, path string, size int)
AddOpenBlobovnizcaSize(shardID, path string, size uint64) AddOpenBlobovniczaSize(shardID, path string, size uint64)
SubOpenBlobovnizcaSize(shardID, path string, size uint64) SubOpenBlobovniczaSize(shardID, path string, size uint64)
IncOpenBlobovnizcaCount(shardID, path string) AddOpenBlobovniczaItems(shardID, path string, items uint64)
DecOpenBlobovnizcaCount(shardID, path string) SubOpenBlobovniczaItems(shardID, path string, items uint64)
IncOpenBlobovniczaCount(shardID, path string)
DecOpenBlobovniczaCount(shardID, path string)
} }
type blobovnizca struct { type blobovnicza struct {
treeMode *shardIDPathModeValue treeMode *shardIDPathModeValue
treeReqDuration *prometheus.HistogramVec treeReqDuration *prometheus.HistogramVec
treePut *prometheus.CounterVec treePut *prometheus.CounterVec
treeGet *prometheus.CounterVec treeGet *prometheus.CounterVec
treeOpenSize *prometheus.GaugeVec treeOpenSize *prometheus.GaugeVec
treeOpenItems *prometheus.GaugeVec
treeOpenCounter *prometheus.GaugeVec treeOpenCounter *prometheus.GaugeVec
} }
func newBlobovnizca() *blobovnizca { func newBlobovnicza() *blobovnicza {
return &blobovnizca{ return &blobovnicza{
treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnizca tree mode"), treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnicza tree mode"),
treeReqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{ treeReqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem, Subsystem: blobovniczaTreeSubSystem,
Name: "request_duration_seconds", Name: "request_duration_seconds",
Help: "Accumulated Blobovnizca tree request process duration", Help: "Accumulated Blobovnicza tree request process duration",
}, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}), }, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}),
treePut: metrics.NewCounterVec(prometheus.CounterOpts{ treePut: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem, Subsystem: blobovniczaTreeSubSystem,
Name: "put_bytes", Name: "put_bytes",
Help: "Accumulated payload size written to Blobovnizca tree", Help: "Accumulated payload size written to Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}), }, []string{shardIDLabel, pathLabel}),
treeGet: metrics.NewCounterVec(prometheus.CounterOpts{ treeGet: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem, Subsystem: blobovniczaTreeSubSystem,
Name: "get_bytes", Name: "get_bytes",
Help: "Accumulated payload size read from Blobovnizca tree", Help: "Accumulated payload size read from Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}), }, []string{shardIDLabel, pathLabel}),
treeOpenSize: metrics.NewGaugeVec(prometheus.GaugeOpts{ treeOpenSize: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem, Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnizca_size_bytes", Name: "open_blobovnicza_size_bytes",
Help: "Size of opened blobovnizcas of Blobovnizca tree", Help: "Size of opened blobovniczas of Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}),
treeOpenItems: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnicza_items_total",
Help: "Count of items in opened blobovniczas of Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}), }, []string{shardIDLabel, pathLabel}),
treeOpenCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ treeOpenCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace, Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem, Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnizca_count", Name: "open_blobovnicza_count",
Help: "Count of opened blobovnizcas of Blobovnizca tree", Help: "Count of opened blobovniczas of Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}), }, []string{shardIDLabel, pathLabel}),
} }
} }
func (b *blobovnizca) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) { func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) {
b.treeMode.SetMode(shardID, path, modeFromBool(readOnly)) b.treeMode.SetMode(shardID, path, modeFromBool(readOnly))
} }
func (b *blobovnizca) CloseBlobobvnizcaTree(shardID, path string) { func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
b.treeMode.SetMode(shardID, path, closedMode) b.treeMode.SetMode(shardID, path, closedMode)
b.treeReqDuration.DeletePartialMatch(prometheus.Labels{ b.treeReqDuration.DeletePartialMatch(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
@ -88,7 +98,7 @@ func (b *blobovnizca) CloseBlobobvnizcaTree(shardID, path string) {
}) })
} }
func (b *blobovnizca) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) { func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
b.treeReqDuration.With(prometheus.Labels{ b.treeReqDuration.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
@ -98,44 +108,58 @@ func (b *blobovnizca) BlobobvnizcaTreeMethodDuration(shardID, path string, metho
}).Observe(d.Seconds()) }).Observe(d.Seconds())
} }
func (b *blobovnizca) AddBlobobvnizcaTreePut(shardID, path string, size int) { func (b *blobovnicza) AddBlobobvnizcaTreePut(shardID, path string, size int) {
b.treePut.With(prometheus.Labels{ b.treePut.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Add(float64(size)) }).Add(float64(size))
} }
func (b *blobovnizca) AddBlobobvnizcaTreeGet(shardID, path string, size int) { func (b *blobovnicza) AddBlobobvnizcaTreeGet(shardID, path string, size int) {
b.treeGet.With(prometheus.Labels{ b.treeGet.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Add(float64(size)) }).Add(float64(size))
} }
func (b *blobovnizca) AddOpenBlobovnizcaSize(shardID, path string, size uint64) { func (b *blobovnicza) AddOpenBlobovniczaSize(shardID, path string, size uint64) {
b.treeOpenSize.With(prometheus.Labels{ b.treeOpenSize.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Add(float64(size)) }).Add(float64(size))
} }
func (b *blobovnizca) SubOpenBlobovnizcaSize(shardID, path string, size uint64) { func (b *blobovnicza) SubOpenBlobovniczaSize(shardID, path string, size uint64) {
b.treeOpenSize.With(prometheus.Labels{ b.treeOpenSize.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Sub(float64(size)) }).Sub(float64(size))
} }
func (b *blobovnizca) IncOpenBlobovnizcaCount(shardID, path string) { func (b *blobovnicza) IncOpenBlobovniczaCount(shardID, path string) {
b.treeOpenCounter.With(prometheus.Labels{ b.treeOpenCounter.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Inc() }).Inc()
} }
func (b *blobovnizca) DecOpenBlobovnizcaCount(shardID, path string) { func (b *blobovnicza) DecOpenBlobovniczaCount(shardID, path string) {
b.treeOpenCounter.With(prometheus.Labels{ b.treeOpenCounter.With(prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
pathLabel: path, pathLabel: path,
}).Dec() }).Dec()
} }
func (b *blobovnicza) AddOpenBlobovniczaItems(shardID, path string, items uint64) {
b.treeOpenItems.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Add(float64(items))
}
func (b *blobovnicza) SubOpenBlobovniczaItems(shardID, path string, items uint64) {
b.treeOpenItems.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Sub(float64(items))
}

View file

@ -14,7 +14,7 @@ type NodeMetrics struct {
epoch prometheus.Gauge epoch prometheus.Gauge
fstree *fstreeMetrics fstree *fstreeMetrics
blobstore *blobstoreMetrics blobstore *blobstoreMetrics
blobobvnizca *blobovnizca blobobvnizca *blobovnicza
metabase *metabaseMetrics metabase *metabaseMetrics
pilorama *piloramaMetrics pilorama *piloramaMetrics
grpc *grpcServerMetrics grpc *grpcServerMetrics
@ -35,7 +35,7 @@ func NewNodeMetrics() *NodeMetrics {
}), }),
fstree: newFSTreeMetrics(), fstree: newFSTreeMetrics(),
blobstore: newBlobstoreMetrics(), blobstore: newBlobstoreMetrics(),
blobobvnizca: newBlobovnizca(), blobobvnizca: newBlobovnicza(),
metabase: newMetabaseMetrics(), metabase: newMetabaseMetrics(),
pilorama: newPiloramaMetrics(), pilorama: newPiloramaMetrics(),
grpc: newGrpcServerMetrics(), grpc: newGrpcServerMetrics(),

View file

@ -29,6 +29,7 @@ const (
containersOfMethod = "containersOf" containersOfMethod = "containersOf"
eaclMethod = "eACL" eaclMethod = "eACL"
setEACLMethod = "setEACL" setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
startEstimationMethod = "startContainerEstimation" startEstimationMethod = "startContainerEstimation"
stopEstimationMethod = "stopContainerEstimation" stopEstimationMethod = "stopContainerEstimation"

View file

@ -0,0 +1,68 @@
package container
import (
"crypto/sha256"
"fmt"
"strings"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
return DeletionInfo((*Client)(x), cnr)
}
type deletionInfo interface {
DeletionInfo(cid []byte) (*containercore.DelInfo, error)
}
func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
return c.DeletionInfo(binCnr)
}
func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
return nil, fmt.Errorf("unexpected container stack item count (%s): %d", deletionInfoMethod, len(arr))
}
owner, err := client.BytesFromStackItem(arr[0])
if err != nil {
return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.IntFromStackItem(arr[1])
if err != nil {
return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
Owner: owner,
Epoch: int(epoch),
}, nil
}

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -24,7 +25,7 @@ import (
type Service struct { type Service struct {
*cfg *cfg
c senderClassifier c objectCore.SenderClassifier
} }
type putStreamBasicChecker struct { type putStreamBasicChecker struct {
@ -95,11 +96,7 @@ func New(next object.ServiceServer,
return Service{ return Service{
cfg: cfg, cfg: cfg,
c: senderClassifier{ c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
log: cfg.log,
innerRing: cfg.irFetcher,
netmap: cfg.nm,
},
} }
} }
@ -652,20 +649,24 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in
} }
// find request role and key // find request role and key
res, err := b.c.classify(req, idCnr, cnr.Value) ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return info, err
}
res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
if err != nil { if err != nil {
return info, err return info, err
} }
info.basicACL = cnr.Value.BasicACL() info.basicACL = cnr.Value.BasicACL()
info.requestRole = res.role info.requestRole = res.Role
info.operation = op info.operation = op
info.cnrOwner = cnr.Value.Owner() info.cnrOwner = cnr.Value.Owner()
info.idCnr = idCnr info.idCnr = idCnr
// it is assumed that at the moment the key will be valid, // it is assumed that at the moment the key will be valid,
// otherwise the request would not pass validation // otherwise the request would not pass validation
info.senderKey = res.key info.senderKey = res.Key
// add bearer token if it is present in request // add bearer token if it is present in request
info.bearer = req.bearer info.bearer = req.bearer

View file

@ -1,7 +0,0 @@
package deletesvc
import "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
func (exec *execCtx) executeOnContainer() {
exec.log.Debug(logs.DeleteRequestIsNotRolledOverToTheContainer)
}

View file

@ -29,33 +29,17 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
exec.setLogger(s.log) exec.setLogger(s.log)
exec.execute(ctx) return exec.execute(ctx)
return exec.statusError.err
} }
func (exec *execCtx) execute(ctx context.Context) { func (exec *execCtx) execute(ctx context.Context) error {
exec.log.Debug(logs.ServingRequest) exec.log.Debug(logs.ServingRequest)
// perform local operation if err := exec.executeLocal(ctx); err != nil {
exec.executeLocal(ctx) exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
return err
}
exec.analyzeStatus(true)
}
func (exec *execCtx) analyzeStatus(execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
exec.log.Debug(logs.OperationFinishedSuccessfully) exec.log.Debug(logs.OperationFinishedSuccessfully)
default: return nil
exec.log.Debug(logs.OperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
if execCnr {
exec.executeOnContainer()
exec.analyzeStatus(false)
}
}
} }

View file

@ -2,6 +2,7 @@ package deletesvc
import ( import (
"context" "context"
"fmt"
"strconv" "strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
@ -15,18 +16,11 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
type statusError struct {
status int
err error
}
type execCtx struct { type execCtx struct {
svc *Service svc *Service
prm Prm prm Prm
statusError
log *logger.Logger log *logger.Logger
tombstone *objectSDK.Tombstone tombstone *objectSDK.Tombstone
@ -36,11 +30,6 @@ type execCtx struct {
tombstoneObj *objectSDK.Object tombstoneObj *objectSDK.Object
} }
const (
statusUndefined int = iota
statusOK
)
func (exec *execCtx) setLogger(l *logger.Logger) { func (exec *execCtx) setLogger(l *logger.Logger) {
exec.log = &logger.Logger{Logger: l.With( exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "DELETE"), zap.String("request", "DELETE"),
@ -75,48 +64,34 @@ func (exec *execCtx) newAddress(id oid.ID) oid.Address {
return a return a
} }
func (exec *execCtx) formSplitInfo(ctx context.Context) bool { func (exec *execCtx) formSplitInfo(ctx context.Context) error {
success := false
var err error var err error
exec.splitInfo, err = exec.svc.header.splitInfo(ctx, exec) exec.splitInfo, err = exec.svc.header.splitInfo(ctx, exec)
if err != nil && !apiclient.IsErrObjectAlreadyRemoved(err) {
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotComposeSplitInfo,
zap.String("error", err.Error()),
)
case err == nil, apiclient.IsErrObjectAlreadyRemoved(err):
// IsErrObjectAlreadyRemoved check is required because splitInfo // IsErrObjectAlreadyRemoved check is required because splitInfo
// implicitly performs Head request that may return ObjectAlreadyRemoved // implicitly performs Head request that may return ObjectAlreadyRemoved
// status that is not specified for Delete // status that is not specified for Delete.
return err
exec.status = statusOK
exec.err = nil
success = true
} }
return success return nil
} }
func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) { func (exec *execCtx) collectMembers(ctx context.Context) error {
if exec.splitInfo == nil { if exec.splitInfo == nil {
exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY) exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return true return nil
} }
var err error
if _, withLink := exec.splitInfo.Link(); withLink { if _, withLink := exec.splitInfo.Link(); withLink {
ok = exec.collectChildren(ctx) err = exec.collectChildren(ctx)
} }
if !ok { if err != nil {
if _, withLast := exec.splitInfo.LastPart(); withLast { if _, withLast := exec.splitInfo.LastPart(); withLast {
ok = exec.collectChain(ctx) if err := exec.collectChain(ctx); err != nil {
if !ok { return err
return
} }
} }
} // may be fail if neither right nor linking ID is set? } // may be fail if neither right nor linking ID is set?
@ -124,7 +99,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) {
return exec.supplementBySplitID(ctx) return exec.supplementBySplitID(ctx)
} }
func (exec *execCtx) collectChain(ctx context.Context) bool { func (exec *execCtx) collectChain(ctx context.Context) error {
var chain []oid.ID var chain []oid.ID
exec.log.Debug(logs.DeleteAssemblingChain) exec.log.Debug(logs.DeleteAssemblingChain)
@ -133,84 +108,43 @@ func (exec *execCtx) collectChain(ctx context.Context) bool {
chain = append(chain, prev) chain = append(chain, prev)
p, err := exec.svc.header.previous(ctx, exec, prev) p, err := exec.svc.header.previous(ctx, exec, prev)
if err != nil {
switch { return fmt.Errorf("get previous split element for %s: %w", prev, err)
default: }
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotGetPreviousSplitElement,
zap.Stringer("id", prev),
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
withPrev = p != nil withPrev = p != nil
if withPrev { if withPrev {
prev = *p prev = *p
} }
} }
}
exec.addMembers(chain) exec.addMembers(chain)
return nil
return true
} }
func (exec *execCtx) collectChildren(ctx context.Context) bool { func (exec *execCtx) collectChildren(ctx context.Context) error {
exec.log.Debug(logs.DeleteCollectingChildren) exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec) children, err := exec.svc.header.children(ctx, exec)
if err != nil {
switch { return fmt.Errorf("collect children: %w", err)
default: }
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotCollectObjectChildren,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
link, _ := exec.splitInfo.Link() link, _ := exec.splitInfo.Link()
exec.addMembers(append(children, link)) exec.addMembers(append(children, link))
return nil
return true
}
} }
func (exec *execCtx) supplementBySplitID(ctx context.Context) bool { func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
exec.log.Debug(logs.DeleteSupplementBySplitID) exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec) chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
switch { return fmt.Errorf("search split chain members: %w", err)
default: }
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotSearchForSplitChainMembers,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
exec.addMembers(chain) exec.addMembers(chain)
return nil
return true
}
} }
func (exec *execCtx) addMembers(incoming []oid.ID) { func (exec *execCtx) addMembers(incoming []oid.ID) {
@ -228,17 +162,10 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
exec.tombstone.SetMembers(append(members, incoming...)) exec.tombstone.SetMembers(append(members, incoming...))
} }
func (exec *execCtx) initTombstoneObject() bool { func (exec *execCtx) initTombstoneObject() error {
payload, err := exec.tombstone.Marshal() payload, err := exec.tombstone.Marshal()
if err != nil { if err != nil {
exec.status = statusUndefined return fmt.Errorf("marshal tombstone: %w", err)
exec.err = err
exec.log.Debug(logs.DeleteCouldNotMarshalTombstoneStructure,
zap.String("error", err.Error()),
)
return false
} }
exec.tombstoneObj = objectSDK.New() exec.tombstoneObj = objectSDK.New()
@ -262,29 +189,15 @@ func (exec *execCtx) initTombstoneObject() bool {
exec.tombstoneObj.SetAttributes(a) exec.tombstoneObj.SetAttributes(a)
return true return nil
} }
func (exec *execCtx) saveTombstone(ctx context.Context) bool { func (exec *execCtx) saveTombstone(ctx context.Context) error {
id, err := exec.svc.placer.put(ctx, exec) id, err := exec.svc.placer.put(ctx, exec)
if err != nil {
switch { return fmt.Errorf("save tombstone: %w", err)
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotSaveTheTombstone,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
exec.prm.tombAddrWriter.
SetAddress(exec.newAddress(*id))
} }
return true exec.prm.tombAddrWriter.SetAddress(exec.newAddress(*id))
return nil
} }

View file

@ -2,37 +2,29 @@ package deletesvc
import ( import (
"context" "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
) )
func (exec *execCtx) executeLocal(ctx context.Context) { func (exec *execCtx) executeLocal(ctx context.Context) error {
exec.log.Debug(logs.DeleteFormingTombstoneStructure) exec.log.Debug(logs.DeleteFormingTombstoneStructure)
ok := exec.formTombstone(ctx) if err := exec.formTombstone(ctx); err != nil {
if !ok { return err
return
} }
exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving) exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
exec.saveTombstone(ctx) return exec.saveTombstone(ctx)
} }
func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) { func (exec *execCtx) formTombstone(ctx context.Context) error {
tsLifetime, err := exec.svc.netInfo.TombstoneLifetime() tsLifetime, err := exec.svc.netInfo.TombstoneLifetime()
if err != nil { if err != nil {
exec.status = statusUndefined return fmt.Errorf("fetch tombstone lifetime: %w", err)
exec.err = err
exec.log.Debug(logs.DeleteCouldNotReadTombstoneLifetimeConfig,
zap.String("error", err.Error()),
)
return false
} }
exec.tombstone = objectSDK.NewTombstone() exec.tombstone = objectSDK.NewTombstone()
@ -43,26 +35,19 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
exec.log.Debug(logs.DeleteFormingSplitInfo) exec.log.Debug(logs.DeleteFormingSplitInfo)
ok = exec.formSplitInfo(ctx) if err := exec.formSplitInfo(ctx); err != nil {
if !ok { return fmt.Errorf("form split info: %w", err)
return
} }
exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
ok = exec.collectMembers(ctx) if err := exec.collectMembers(ctx); err != nil {
if !ok { return err
return
} }
exec.log.Debug(logs.DeleteMembersSuccessfullyCollected) exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
ok = exec.initTombstoneObject() return exec.initTombstoneObject()
if !ok {
return
}
return true
} }

View file

@ -273,7 +273,7 @@ func TestGetLocalOnly(t *testing.T) {
newSvc := func(storage *testStorage) *Service { newSvc := func(storage *testStorage) *Service {
return &Service{ return &Service{
log: test.NewLogger(t, false), log: test.NewLogger(t, true),
localStorage: storage, localStorage: storage,
} }
} }
@ -535,7 +535,7 @@ func TestGetRemoteSmall(t *testing.T) {
const curEpoch = 13 const curEpoch = 13
return &Service{ return &Service{
log: test.NewLogger(t, false), log: test.NewLogger(t, true),
localStorage: newTestStorage(), localStorage: newTestStorage(),
traverserGenerator: &testTraverserGenerator{ traverserGenerator: &testTraverserGenerator{
c: cnr, c: cnr,
@ -1667,7 +1667,7 @@ func TestGetFromPastEpoch(t *testing.T) {
const curEpoch = 13 const curEpoch = 13
svc := &Service{ svc := &Service{
log: test.NewLogger(t, false), log: test.NewLogger(t, true),
localStorage: newTestStorage(), localStorage: newTestStorage(),
epochSource: testEpochReceiver(curEpoch), epochSource: testEpochReceiver(curEpoch),
traverserGenerator: &testTraverserGenerator{ traverserGenerator: &testTraverserGenerator{

View file

@ -88,7 +88,7 @@ func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
type GetObjectPrm struct { type GetObjectPrm struct {
readPrmCommon readPrmCommon
cliPrm client.PrmObjectGet ClientParams client.PrmObjectGet
obj oid.ID obj oid.ID
} }
@ -97,7 +97,7 @@ type GetObjectPrm struct {
// //
// By default request will not be raw. // By default request will not be raw.
func (x *GetObjectPrm) SetRawFlag() { func (x *GetObjectPrm) SetRawFlag() {
x.cliPrm.MarkRaw() x.ClientParams.Raw = true
} }
// SetAddress sets object address. // SetAddress sets object address.
@ -105,8 +105,10 @@ func (x *GetObjectPrm) SetRawFlag() {
// Required parameter. // Required parameter.
func (x *GetObjectPrm) SetAddress(addr oid.Address) { func (x *GetObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object() x.obj = addr.Object()
x.cliPrm.FromContainer(addr.Container()) cnr := addr.Container()
x.cliPrm.ByID(x.obj)
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
} }
// GetObjectRes groups the resulting values of GetObject operation. // GetObjectRes groups the resulting values of GetObject operation.
@ -134,23 +136,15 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
// request will almost definitely fail. The case can occur, for example, // request will almost definitely fail. The case can occur, for example,
// when session is bound to the parent object and child object is requested. // when session is bound to the parent object and child object is requested.
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.cliPrm.WithinSession(*prm.tokenSession) prm.ClientParams.Session = prm.tokenSession
} }
if prm.tokenBearer != nil { prm.ClientParams.XHeaders = prm.xHeaders
prm.cliPrm.WithBearerToken(*prm.tokenBearer) prm.ClientParams.BearerToken = prm.tokenBearer
} prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
if prm.local { rdr, err := prm.cli.ObjectGetInit(ctx, prm.ClientParams)
prm.cliPrm.MarkLocal()
}
prm.cliPrm.WithXHeaders(prm.xHeaders...)
if prm.key != nil {
prm.cliPrm.UseKey(*prm.key)
}
rdr, err := prm.cli.ObjectGetInit(ctx, prm.cliPrm)
if err != nil { if err != nil {
return nil, fmt.Errorf("init object reading: %w", err) return nil, fmt.Errorf("init object reading: %w", err)
} }
@ -187,7 +181,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
type HeadObjectPrm struct { type HeadObjectPrm struct {
readPrmCommon readPrmCommon
cliPrm client.PrmObjectHead ClientParams client.PrmObjectHead
obj oid.ID obj oid.ID
} }
@ -196,7 +190,7 @@ type HeadObjectPrm struct {
// //
// By default request will not be raw. // By default request will not be raw.
func (x *HeadObjectPrm) SetRawFlag() { func (x *HeadObjectPrm) SetRawFlag() {
x.cliPrm.MarkRaw() x.ClientParams.Raw = true
} }
// SetAddress sets object address. // SetAddress sets object address.
@ -204,8 +198,10 @@ func (x *HeadObjectPrm) SetRawFlag() {
// Required parameter. // Required parameter.
func (x *HeadObjectPrm) SetAddress(addr oid.Address) { func (x *HeadObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object() x.obj = addr.Object()
x.cliPrm.FromContainer(addr.Container()) cnr := addr.Container()
x.cliPrm.ByID(x.obj)
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
} }
// HeadObjectRes groups the resulting values of GetObject operation. // HeadObjectRes groups the resulting values of GetObject operation.
@ -230,22 +226,16 @@ func (x HeadObjectRes) Header() *objectSDK.Object {
// //
// HeadObject ignores the provided session if it is not related to the requested objectSDK. // HeadObject ignores the provided session if it is not related to the requested objectSDK.
func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) { func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
if prm.local {
prm.cliPrm.MarkLocal()
}
// see details in same statement of GetObject // see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.cliPrm.WithinSession(*prm.tokenSession) prm.ClientParams.Session = prm.tokenSession
} }
if prm.tokenBearer != nil { prm.ClientParams.BearerToken = prm.tokenBearer
prm.cliPrm.WithBearerToken(*prm.tokenBearer) prm.ClientParams.Local = prm.local
} prm.ClientParams.XHeaders = prm.xHeaders
prm.cliPrm.WithXHeaders(prm.xHeaders...) cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
cliRes, err := prm.cli.ObjectHead(ctx, prm.cliPrm)
if err == nil { if err == nil {
// pull out an error from status // pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status()) err = apistatus.ErrFromStatus(cliRes.Status())
@ -272,7 +262,7 @@ type PayloadRangePrm struct {
ln uint64 ln uint64
cliPrm client.PrmObjectRange ClientParams client.PrmObjectRange
obj oid.ID obj oid.ID
} }
@ -281,7 +271,7 @@ type PayloadRangePrm struct {
// //
// By default request will not be raw. // By default request will not be raw.
func (x *PayloadRangePrm) SetRawFlag() { func (x *PayloadRangePrm) SetRawFlag() {
x.cliPrm.MarkRaw() x.ClientParams.Raw = true
} }
// SetAddress sets object address. // SetAddress sets object address.
@ -289,15 +279,17 @@ func (x *PayloadRangePrm) SetRawFlag() {
// Required parameter. // Required parameter.
func (x *PayloadRangePrm) SetAddress(addr oid.Address) { func (x *PayloadRangePrm) SetAddress(addr oid.Address) {
x.obj = addr.Object() x.obj = addr.Object()
x.cliPrm.FromContainer(addr.Container()) cnr := addr.Container()
x.cliPrm.ByID(x.obj)
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
} }
// SetRange range of the object payload to be read. // SetRange range of the object payload to be read.
// //
// Required parameter. // Required parameter.
func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) { func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) {
x.cliPrm.SetOffset(rng.GetOffset()) x.ClientParams.Offset = rng.GetOffset()
x.ln = rng.GetLength() x.ln = rng.GetLength()
} }
@ -329,23 +321,17 @@ const maxInitialBufferSize = 1024 * 1024 // 1 MiB
// //
// PayloadRange ignores the provided session if it is not related to the requested objectSDK. // PayloadRange ignores the provided session if it is not related to the requested objectSDK.
func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) { func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
if prm.local {
prm.cliPrm.MarkLocal()
}
// see details in same statement of GetObject // see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) { if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.cliPrm.WithinSession(*prm.tokenSession) prm.ClientParams.Session = prm.tokenSession
} }
if prm.tokenBearer != nil { prm.ClientParams.XHeaders = prm.xHeaders
prm.cliPrm.WithBearerToken(*prm.tokenBearer) prm.ClientParams.BearerToken = prm.tokenBearer
} prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
prm.cliPrm.SetLength(prm.ln) rdr, err := prm.cli.ObjectRangeInit(ctx, prm.ClientParams)
prm.cliPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectRangeInit(ctx, prm.cliPrm)
if err != nil { if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err) return nil, fmt.Errorf("init payload reading: %w", err)
} }

View file

@ -0,0 +1,119 @@
package putsvc
import (
"context"
"fmt"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
type nodeIterator struct {
traversal
cfg *cfg
}
func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
return &nodeIterator{
traversal: traversal{
opts: opts,
mExclude: make(map[string]*bool),
},
cfg: c,
}
}
func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
traverser, err := placement.NewTraverser(n.traversal.opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
resErr := &atomic.Value{}
// Must iterate over all replicas, regardless of whether there are identical nodes there.
// At the same time need to exclude identical nodes from processing.
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if n.forEachAddress(ctx, traverser, addrs, f, resErr) {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resErr.Load().(error)
return err
}
// perform additional container broadcast if needed
if n.traversal.submitPrimaryPlacementFinish() {
err := n.forEachNode(ctx, f)
if err != nil {
n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
return nil
}
func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
var wg sync.WaitGroup
for _, addr := range addrs {
addr := addr
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
// This can happen only during additional container broadcast.
continue
}
workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
item := new(bool)
wg.Add(1)
if err := workerPool.Submit(func() {
defer wg.Done()
err := f(ctx, nodeDesc{local: isLocal, info: addr})
if err != nil {
resErr.Store(err)
svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
return true
}
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
n.traversal.submitProcessed(addr, item)
}
wg.Wait()
return false
}
func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
}

View file

@ -3,18 +3,11 @@ package putsvc
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"go.uber.org/zap"
) )
type preparedObjectTarget interface { type preparedObjectTarget interface {
@ -22,22 +15,19 @@ type preparedObjectTarget interface {
} }
type distributedTarget struct { type distributedTarget struct {
traversal traversal placementOpts []placement.Option
extraBroadcastEnabled bool
obj *objectSDK.Object obj *objectSDK.Object
objMeta object.ContentMeta objMeta object.ContentMeta
*cfg
payload *payload payload *payload
nodeTargetInitializer func(nodeDesc) preparedObjectTarget nodeTargetInitializer func(nodeDesc) preparedObjectTarget
getWorkerPool func([]byte) (util.WorkerPool, bool)
relay func(context.Context, nodeDesc) error relay func(context.Context, nodeDesc) error
fmt *object.FormatValidator
log *logger.Logger
} }
// parameters and state of container traversal. // parameters and state of container traversal.
@ -137,15 +127,9 @@ func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Obje
var err error var err error
if t.objMeta, err = t.fmt.ValidateContent(t.obj); err != nil { if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", t, err) return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
} }
if len(t.obj.Children()) > 0 {
// enabling extra broadcast for linking objects
t.traversal.extraBroadcastEnabled = true
}
return t.iteratePlacement(ctx) return t.iteratePlacement(ctx)
} }
@ -166,90 +150,7 @@ func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error
func (t *distributedTarget) iteratePlacement(ctx context.Context) error { func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
id, _ := t.obj.ID() id, _ := t.obj.ID()
traverser, err := placement.NewTraverser( iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
append(t.traversal.opts, placement.ForObject(id))..., iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
) return iter.forEachNode(ctx, t.sendObject)
if err != nil {
return fmt.Errorf("(%T) could not create object placement traverser: %w", t, err)
}
resErr := &atomic.Value{}
// Must iterate over all replicas, regardless of whether there are identical nodes there.
// At the same time need to exclude identical nodes from processing.
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if t.iterateAddresses(ctx, traverser, addrs, resErr) {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resErr.Load().(error)
return err
}
// perform additional container broadcast if needed
if t.traversal.submitPrimaryPlacementFinish() {
err = t.iteratePlacement(ctx)
if err != nil {
t.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
return nil
}
func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool {
wg := &sync.WaitGroup{}
for i := range addrs {
addr := addrs[i]
if val := t.traversal.mExclude[string(addr.PublicKey())]; val != nil {
// Check is node processed successful on the previous iteration.
if *val {
traverser.SubmitSuccess()
}
// it can happen only during additional container broadcast
continue
}
wg.Add(1)
item := new(bool)
workerPool, isLocal := t.getWorkerPool(addr.PublicKey())
if err := workerPool.Submit(func() {
defer wg.Done()
err := t.sendObject(ctx, nodeDesc{local: isLocal, info: addr})
if err != nil {
resErr.Store(err)
svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(t.log, "PUT", err)
return true
}
// mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
t.traversal.submitProcessed(addr, item)
}
wg.Wait()
return false
} }

View file

@ -29,6 +29,14 @@ type ClientConstructor interface {
Get(client.NodeInfo) (client.MultiAddressClient, error) Get(client.NodeInfo) (client.MultiAddressClient, error)
} }
type InnerRing interface {
InnerRingKeys() ([][]byte, error)
}
type FormatValidatorConfig interface {
VerifySessionTokenIssuer() bool
}
type cfg struct { type cfg struct {
keyStorage *objutil.KeyStorage keyStorage *objutil.KeyStorage
@ -51,6 +59,8 @@ type cfg struct {
clientConstructor ClientConstructor clientConstructor ClientConstructor
log *logger.Logger log *logger.Logger
verifySessionTokenIssuer bool
} }
func NewService(ks *objutil.KeyStorage, func NewService(ks *objutil.KeyStorage,
@ -61,6 +71,7 @@ func NewService(ks *objutil.KeyStorage,
ns netmap.Source, ns netmap.Source,
nk netmap.AnnouncedKeys, nk netmap.AnnouncedKeys,
nst netmap.State, nst netmap.State,
ir InnerRing,
opts ...Option) *Service { opts ...Option) *Service {
c := &cfg{ c := &cfg{
remotePool: util.NewPseudoWorkerPool(), remotePool: util.NewPseudoWorkerPool(),
@ -80,7 +91,15 @@ func NewService(ks *objutil.KeyStorage,
opts[i](c) opts[i](c)
} }
c.fmtValidator = object.NewFormatValidator(object.WithLockSource(os), object.WithNetState(nst)) c.fmtValidator = object.NewFormatValidator(
object.WithLockSource(os),
object.WithNetState(nst),
object.WithInnerRing(ir),
object.WithNetmapSource(ns),
object.WithContainersSource(cs),
object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer),
object.WithLogger(c.log),
)
return &Service{ return &Service{
cfg: c, cfg: c,
@ -104,3 +123,9 @@ func WithLogger(l *logger.Logger) Option {
c.log = l c.log = l
} }
} }
func WithVerifySessionTokenIssuer(v bool) Option {
return func(c *cfg) {
c.verifySessionTokenIssuer = v
}
}

View file

@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"hash" "hash"
"sync" "sync"
"sync/atomic"
objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
@ -150,18 +149,19 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
if err != nil { if err != nil {
return err return err
} }
traversal := &traversal{
opts: placementOptions, iter := s.cfg.newNodeIterator(placementOptions)
extraBroadcastEnabled: len(obj.Children()) > 0 || iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
(!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock)),
mExclude: make(map[string]*bool),
}
signer := &putSingleRequestSigner{ signer := &putSingleRequestSigner{
req: req, req: req,
keyStorage: s.keyStorage, keyStorage: s.keyStorage,
signer: &sync.Once{}, signer: &sync.Once{},
} }
return s.saveAccordingToPlacement(ctx, obj, signer, traversal, meta)
return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error {
return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
} }
func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) ([]placement.Option, error) { func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) ([]placement.Option, error) {
@ -199,97 +199,6 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
return result, nil return result, nil
} }
func (s *Service) saveAccordingToPlacement(ctx context.Context, obj *objectSDK.Object, signer *putSingleRequestSigner,
traversal *traversal, meta object.ContentMeta) error {
traverser, err := placement.NewTraverser(traversal.opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
var resultError atomic.Value
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if stop := s.saveToPlacementNodes(ctx, obj, signer, traversal, traverser, addrs, meta, &resultError); stop {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resultError.Load().(error)
return err
}
if traversal.submitPrimaryPlacementFinish() {
err = s.saveAccordingToPlacement(ctx, obj, signer, traversal, meta)
if err != nil {
s.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
}
}
return nil
}
func (s *Service) saveToPlacementNodes(ctx context.Context,
obj *objectSDK.Object,
signer *putSingleRequestSigner,
traversal *traversal,
traverser *placement.Traverser,
nodeAddresses []placement.Node,
meta object.ContentMeta,
resultError *atomic.Value,
) bool {
wg := sync.WaitGroup{}
for _, nodeAddress := range nodeAddresses {
nodeAddress := nodeAddress
if ok := traversal.mExclude[string(nodeAddress.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
continue
}
local := false
workerPool := s.remotePool
if s.netmapKeys.IsLocalKey(nodeAddress.PublicKey()) {
local = true
workerPool = s.localPool
}
item := new(bool)
wg.Add(1)
if err := workerPool.Submit(func() {
defer wg.Done()
err := s.saveToPlacementNode(ctx, &nodeDesc{local: local, info: nodeAddress}, obj, signer, meta)
if err != nil {
resultError.Store(err)
svcutil.LogServiceError(s.log, "PUT", nodeAddress.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(s.log, "PUT", err)
return true
}
traversal.submitProcessed(nodeAddress, item)
}
wg.Wait()
return false
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object, func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object,
signer *putSingleRequestSigner, meta object.ContentMeta) error { signer *putSingleRequestSigner, meta object.ContentMeta) error {
if nodeDesc.local { if nodeDesc.local {

View file

@ -215,13 +215,10 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) *distributedTarget {
withBroadcast := !prm.common.LocalOnly() && (typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLock) withBroadcast := !prm.common.LocalOnly() && (typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLock)
return &distributedTarget{ return &distributedTarget{
traversal: traversal{ cfg: p.cfg,
opts: prm.traverseOpts, placementOpts: prm.traverseOpts,
extraBroadcastEnabled: withBroadcast, extraBroadcastEnabled: withBroadcast,
},
payload: getPayload(), payload: getPayload(),
getWorkerPool: p.getWorkerPool,
nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget { nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
if node.local { if node.local {
return localTarget{ return localTarget{
@ -240,8 +237,6 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) *distributedTarget {
return rt return rt
}, },
relay: relay, relay: relay,
fmt: p.fmtValidator,
log: p.log,
} }
} }
@ -279,9 +274,9 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
}, nil }, nil
} }
func (p *Streamer) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) { func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
if p.netmapKeys.IsLocalKey(pub) { if c.netmapKeys.IsLocalKey(pub) {
return p.localPool, true return c.localPool, true
} }
return p.remotePool, false return c.remotePool, false
} }

View file

@ -3,6 +3,7 @@ package searchsvc
import ( import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt"
"sync" "sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -10,12 +11,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
func (exec *execCtx) executeOnContainer(ctx context.Context) { func (exec *execCtx) executeOnContainer(ctx context.Context) error {
if exec.isLocal() {
exec.log.Debug(logs.SearchReturnResultDirectly)
return
}
lookupDepth := exec.netmapLookupDepth() lookupDepth := exec.netmapLookupDepth()
exec.log.Debug(logs.TryingToExecuteInContainer, exec.log.Debug(logs.TryingToExecuteInContainer,
@ -23,13 +19,12 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
) )
// initialize epoch number // initialize epoch number
ok := exec.initEpoch() if err := exec.initEpoch(); err != nil {
if !ok { return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
return
} }
for { for {
if exec.processCurrentEpoch(ctx) { if err := exec.processCurrentEpoch(ctx); err != nil {
break break
} }
@ -44,18 +39,17 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
exec.curProcEpoch-- exec.curProcEpoch--
} }
exec.status = statusOK return nil
exec.err = nil
} }
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool { func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
exec.log.Debug(logs.ProcessEpoch, exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch), zap.Uint64("number", exec.curProcEpoch),
) )
traverser, ok := exec.generateTraverser(exec.containerID()) traverser, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
if !ok { if err != nil {
return true return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
} }
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
@ -91,12 +85,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
c, err := exec.svc.clientConstructor.get(info) c, err := exec.svc.clientConstructor.get(info)
if err != nil { if err != nil {
mtx.Lock() exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
exec.status = statusUndefined
exec.err = err
mtx.Unlock()
exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient)
return return
} }
@ -109,13 +98,17 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
} }
mtx.Lock() mtx.Lock()
exec.writeIDList(ids) err = exec.writeIDList(ids)
mtx.Unlock() mtx.Unlock()
if err != nil {
exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
return
}
}(i) }(i)
} }
wg.Wait() wg.Wait()
} }
return false return nil
} }

View file

@ -1,8 +1,6 @@
package searchsvc package searchsvc
import ( import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -10,34 +8,16 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
type statusError struct {
status int
err error
}
type execCtx struct { type execCtx struct {
svc *Service svc *Service
prm Prm prm Prm
statusError
log *logger.Logger log *logger.Logger
curProcEpoch uint64 curProcEpoch uint64
} }
const (
statusUndefined int = iota
statusOK
)
func (exec *execCtx) prepare() {
if _, ok := exec.prm.writer.(*uniqueIDWriter); !ok {
exec.prm.writer = newUniqueAddressWriter(exec.prm.writer)
}
}
func (exec *execCtx) setLogger(l *logger.Logger) { func (exec *execCtx) setLogger(l *logger.Logger) {
exec.log = &logger.Logger{Logger: l.With( exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "SEARCH"), zap.String("request", "SEARCH"),
@ -68,64 +48,24 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth() return exec.prm.common.NetmapLookupDepth()
} }
func (exec *execCtx) initEpoch() bool { func (exec *execCtx) initEpoch() error {
exec.curProcEpoch = exec.netmapEpoch() exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 { if exec.curProcEpoch > 0 {
return true return nil
} }
e, err := exec.svc.currentEpochReceiver.currentEpoch() e, err := exec.svc.currentEpochReceiver.Epoch()
if err != nil {
return err
}
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.curProcEpoch = e exec.curProcEpoch = e
return true return nil
}
} }
func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool) { func (exec *execCtx) writeIDList(ids []oid.ID) error {
t, err := exec.svc.traverserGenerator.generateTraverser(cnr, exec.curProcEpoch)
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.SearchCouldNotGenerateContainerTraverser,
zap.String("error", err.Error()),
)
return nil, false
case err == nil:
return t, true
}
}
func (exec *execCtx) writeIDList(ids []oid.ID) {
ids = exec.filterAllowedObjectIDs(ids) ids = exec.filterAllowedObjectIDs(ids)
err := exec.prm.writer.WriteIDs(ids) return exec.prm.writer.WriteIDs(ids)
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers,
zap.String("error", err.Error()),
)
case err == nil:
exec.status = statusOK
exec.err = nil
}
} }
func (exec *execCtx) filterAllowedObjectIDs(objIDs []oid.ID) []oid.ID { func (exec *execCtx) filterAllowedObjectIDs(objIDs []oid.ID) []oid.ID {

View file

@ -2,24 +2,22 @@ package searchsvc
import ( import (
"context" "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap" "go.uber.org/zap"
) )
func (exec *execCtx) executeLocal(ctx context.Context) { func (exec *execCtx) executeLocal(ctx context.Context) error {
ids, err := exec.svc.localStorage.search(ctx, exec) ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil { if err != nil {
exec.status = statusUndefined exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
exec.err = err return err
exec.log.Debug(logs.SearchLocalOperationFailed,
zap.String("error", err.Error()),
)
return
} }
exec.writeIDList(ids) if err := exec.writeIDList(ids); err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotWriteObjectIdentifiers, err)
}
return nil
} }

Some files were not shown because too many files have changed in this diff Show more