Compare commits

..

No commits in common. "7456c8556a8f924f065c7e18f1b80b1624847dd0" and "10e63537b27eeaf7dd9720e4b0d4492dbc84026d" have entirely different histories.

113 changed files with 2027 additions and 2810 deletions

View file

@ -24,7 +24,6 @@ jobs:
- name: Build CLI
run: make bin/frostfs-cli
- run: bin/frostfs-cli --version
- name: Build NODE
run: make bin/frostfs-node
@ -34,8 +33,6 @@ jobs:
- name: Build ADM
run: make bin/frostfs-adm
- run: bin/frostfs-adm --version
- name: Build LENS
run: make bin/frostfs-lens
- run: bin/frostfs-lens --version

View file

@ -38,11 +38,10 @@ linters-settings:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
custom:
truecloudlab-linters:
noliteral:
path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
target-methods : ["reportFlushError", "reportError"]
disable-packages: ["codes", "err", "res","exec"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -78,6 +77,6 @@ linters:
- gocognit
- contextcheck
- importas
- truecloudlab-linters
- noliteral
disable-all: true
fast: false

View file

@ -9,7 +9,6 @@ HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.21
LINT_VERSION ?= 1.54.0
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
ARCH = amd64
BIN = bin
@ -27,7 +26,7 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed "s/-/~/")-${OS_RELEASE}
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)
TMP_DIR := .cache
.PHONY: help all images dep clean fmts fmt imports test lint docker/lint
@ -140,7 +139,7 @@ pre-commit-run:
lint-install:
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@git clone --depth 1 https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true

View file

@ -145,12 +145,12 @@ func registerNNS(nnsCs *state.Contract, c *initializeContext, zone string, domai
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
zone, c.CommitteeAcc.Contract.ScriptHash(),
frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(),
frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
} else {
s, ok, err := c.nnsRegisterDomainScript(nnsCs.Hash, cs.Hash, domain)

View file

@ -27,8 +27,6 @@ import (
const defaultExpirationTime = 10 * 365 * 24 * time.Hour / time.Second
const frostfsOpsEmail = "ops@frostfs.info"
func (c *initializeContext) setNNS() error {
nnsCs, err := c.Client.GetContractStateByID(1)
if err != nil {
@ -42,7 +40,7 @@ func (c *initializeContext) setNNS() error {
bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsCs.Hash, "register", callflag.All,
"frostfs", c.CommitteeAcc.Contract.ScriptHash(),
frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if err := c.sendCommitteeTx(bw.Bytes(), true); err != nil {
return fmt.Errorf("can't add domain root to NNS: %w", err)
@ -124,7 +122,7 @@ func (c *initializeContext) emitUpdateNNSGroupScript(bw *io.BufBinWriter, nnsHas
if isAvail {
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
morphClient.NNSGroupKeyName, c.CommitteeAcc.Contract.ScriptHash(),
frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
@ -172,7 +170,7 @@ func (c *initializeContext) nnsRegisterDomainScript(nnsHash, expectedHash util.U
bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, nnsHash, "register", callflag.All,
domain, c.CommitteeAcc.Contract.ScriptHash(),
frostfsOpsEmail, int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
"ops@nspcc.ru", int64(3600), int64(600), int64(defaultExpirationTime), int64(3600))
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
if bw.Err != nil {

View file

@ -176,8 +176,8 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
// EACLPrm groups parameters of EACL operation.
type EACLPrm struct {
Client *client.Client
ClientParams client.PrmContainerEACL
commonPrm
client.PrmContainerEACL
}
// EACLRes groups the resulting values of EACL operation.
@ -194,15 +194,15 @@ func (x EACLRes) EACL() eacl.Table {
//
// Returns any error which prevented the operation from completing correctly in error return.
func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
res.cliRes, err = prm.cli.ContainerEACL(ctx, prm.PrmContainerEACL)
return
}
// SetEACLPrm groups parameters of SetEACL operation.
type SetEACLPrm struct {
Client *client.Client
ClientParams client.PrmContainerSetEACL
commonPrm
client.PrmContainerSetEACL
}
// SetEACLRes groups the resulting values of SetEACL operation.
@ -217,7 +217,7 @@ type SetEACLRes struct{}
//
// Returns any error which prevented the operation from completing correctly in error return.
func SetEACL(ctx context.Context, prm SetEACLPrm) (res SetEACLRes, err error) {
_, err = prm.Client.ContainerSetEACL(ctx, prm.ClientParams)
_, err = prm.cli.ContainerSetEACL(ctx, prm.PrmContainerSetEACL)
return
}
@ -563,19 +563,28 @@ func (x GetObjectRes) Header() *objectSDK.Object {
// Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual.
func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
cnr := prm.objAddr.Container()
obj := prm.objAddr.Object()
var getPrm client.PrmObjectGet
getPrm.FromContainer(prm.objAddr.Container())
getPrm.ByID(prm.objAddr.Object())
getPrm := client.PrmObjectGet{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
if prm.sessionToken != nil {
getPrm.WithinSession(*prm.sessionToken)
}
if prm.bearerToken != nil {
getPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
getPrm.MarkRaw()
}
if prm.local {
getPrm.MarkLocal()
}
getPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectGetInit(ctx, getPrm)
if err != nil {
return nil, fmt.Errorf("init object reading on client: %w", err)
@ -630,20 +639,29 @@ func (x HeadObjectRes) Header() *objectSDK.Object {
// Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual.
func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
cnr := prm.objAddr.Container()
obj := prm.objAddr.Object()
var cliPrm client.PrmObjectHead
cliPrm.FromContainer(prm.objAddr.Container())
cliPrm.ByID(prm.objAddr.Object())
headPrm := client.PrmObjectHead{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
if prm.sessionToken != nil {
cliPrm.WithinSession(*prm.sessionToken)
}
res, err := prm.cli.ObjectHead(ctx, headPrm)
if prm.bearerToken != nil {
cliPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
cliPrm.MarkRaw()
}
if prm.local {
cliPrm.MarkLocal()
}
cliPrm.WithXHeaders(prm.xHeaders...)
res, err := prm.cli.ObjectHead(ctx, cliPrm)
if err != nil {
return nil, fmt.Errorf("read object header via client: %w", err)
}
@ -844,22 +862,32 @@ type PayloadRangeRes struct{}
// Returns any error which prevented the operation from completing correctly in error return.
// For raw reading, returns *object.SplitInfoError error if object is virtual.
func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
cnr := prm.objAddr.Container()
obj := prm.objAddr.Object()
var cliPrm client.PrmObjectRange
cliPrm.FromContainer(prm.objAddr.Container())
cliPrm.ByID(prm.objAddr.Object())
rangePrm := client.PrmObjectRange{
XHeaders: prm.xHeaders,
BearerToken: prm.bearerToken,
Session: prm.sessionToken,
Raw: prm.raw,
Local: prm.local,
ContainerID: &cnr,
ObjectID: &obj,
Offset: prm.rng.GetOffset(),
Length: prm.rng.GetLength(),
if prm.sessionToken != nil {
cliPrm.WithinSession(*prm.sessionToken)
}
rdr, err := prm.cli.ObjectRangeInit(ctx, rangePrm)
if prm.bearerToken != nil {
cliPrm.WithBearerToken(*prm.bearerToken)
}
if prm.raw {
cliPrm.MarkRaw()
}
if prm.local {
cliPrm.MarkLocal()
}
cliPrm.SetOffset(prm.rng.GetOffset())
cliPrm.SetLength(prm.rng.GetLength())
cliPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectRangeInit(ctx, cliPrm)
if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err)
}

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@ -21,12 +20,9 @@ var getExtendedACLCmd = &cobra.Command{
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
eaclPrm := internalclient.EACLPrm{
Client: cli,
ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
var eaclPrm internalclient.EACLPrm
eaclPrm.SetClient(cli)
eaclPrm.SetContainer(id)
res, err := internalclient.EACL(cmd.Context(), eaclPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"github.com/spf13/cobra"
)
@ -49,12 +48,12 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
cmd.Println("ACL extension is enabled in the container, continue processing.")
}
setEACLPrm := internalclient.SetEACLPrm{
Client: cli,
ClientParams: client.PrmContainerSetEACL{
Table: eaclTable,
Session: tok,
},
var setEACLPrm internalclient.SetEACLPrm
setEACLPrm.SetClient(cli)
setEACLPrm.SetTable(*eaclTable)
if tok != nil {
setEACLPrm.WithinSession(*tok)
}
_, err := internalclient.SetEACL(cmd.Context(), setEACLPrm)
@ -66,12 +65,9 @@ Container ID in EACL table will be substituted with ID from the CLI.`,
cmd.Println("awaiting...")
getEACLPrm := internalclient.EACLPrm{
Client: cli,
ClientParams: client.PrmContainerEACL{
ContainerID: &id,
},
}
var getEACLPrm internalclient.EACLPrm
getEACLPrm.SetClient(cli)
getEACLPrm.SetContainer(id)
for i := 0; i < awaitTimeout; i++ {
time.Sleep(1 * time.Second)

View file

@ -37,6 +37,6 @@ func AddOutputFileFlag(cmd *cobra.Command, v *string) {
// AddDBTypeFlag adds the DB type flag to the passed cobra command.
func AddDBTypeFlag(cmd *cobra.Command, v *string) {
cmd.Flags().StringVar(v, flagDBType, "bbolt",
cmd.Flags().StringVar(v, flagOutFile, "bbolt",
"Type of DB used by write cache (default: bbolt)")
}

View file

@ -142,8 +142,7 @@ func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage struct {
containerCache *ttlNetCache[cid.ID, *container.Container]
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
*ttlNetCache[cid.ID, *container.Container]
}
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
@ -152,31 +151,18 @@ func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContain
lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
})
lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(id)
})
return ttlContainerStorage{
containerCache: lruCnrCache,
delInfoCache: lruDelInfoCache,
}
return ttlContainerStorage{lruCnrCache}
}
func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
s.containerCache.set(cnr, nil, new(apistatus.ContainerNotFound))
// The removal invalidates possibly stored error response.
s.delInfoCache.remove(cnr)
s.set(cnr, nil, new(apistatus.ContainerNotFound))
}
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.containerCache.get(cnr)
}
func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) {
return s.delInfoCache.get(cnr)
return s.get(cnr)
}
type ttlEACLStorage struct {

View file

@ -174,7 +174,6 @@ type subStorageCfg struct {
// blobovnicza-specific
size uint64
width uint64
leafWidth uint64
openedCacheSize int
}
@ -289,7 +288,6 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
sCfg.size = sub.Size()
sCfg.depth = sub.ShallowDepth()
sCfg.width = sub.ShallowWidth()
sCfg.leafWidth = sub.LeafWidth()
sCfg.openedCacheSize = sub.OpenedCacheSize()
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
@ -511,8 +509,6 @@ type cfgObject struct {
cfgLocalStorage cfgLocalStorage
tombstoneLifetime uint64
skipSessionTokenIssuerVerification bool
}
type cfgNotifications struct {
@ -681,7 +677,6 @@ func initCfgObject(appCfg *config.Config) cfgObject {
return cfgObject{
pool: initObjectPool(appCfg),
tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
@ -779,7 +774,6 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
blobovniczatree.WithBlobovniczaSize(sRead.size),
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
blobovniczatree.WithLogger(c.log),
}

View file

@ -94,7 +94,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/0/blob", ss[1].Path())
require.EqualValues(t, 0644, ss[1].Perm())
@ -143,7 +142,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.Equal(t, "tmp/1/blob", ss[1].Path())
require.EqualValues(t, 0644, ss[1].Perm())

View file

@ -102,13 +102,3 @@ func (x *Config) OpenedCacheSize() int {
func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
// LeafWidth returns the value of "leaf_width" config parameter.
//
// Returns 0 if the value is not a positive number.
func (x *Config) LeafWidth() uint64 {
return config.UintSafe(
(*config.Config)(x),
"leaf_width",
)
}

View file

@ -51,8 +51,3 @@ func (g PutConfig) PoolSizeLocal() int {
return PutPoolSizeDefault
}
// SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined.
func (g PutConfig) SkipSessionTokenIssuerVerification() bool {
return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification")
}

View file

@ -16,7 +16,6 @@ func TestObjectSection(t *testing.T) {
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote())
require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal())
require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty))
require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification())
})
const path = "../../../../config/example/node"
@ -25,7 +24,6 @@ func TestObjectSection(t *testing.T) {
require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote())
require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal())
require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c))
require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification())
}
configtest.ForEachFileType(path, fileConfigTest)

View file

@ -113,7 +113,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
c.cfgObject.eaclSource = eACLFetcher
cnrRdr.eacl = eACLFetcher
c.cfgObject.cnrSource = cnrSrc
cnrRdr.src = cnrSrc
cnrRdr.get = cnrSrc
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
@ -131,7 +131,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnr, err := cnrSrc.Get(ev.ID)
if err == nil {
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
cachedContainerStorage.set(ev.ID, cnr, nil)
} else {
// unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful
@ -159,6 +159,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
}
cachedContainerStorage.handleRemoval(ev.ID)
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID),
)
@ -169,7 +170,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = cachedContainerLister
cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
cnrRdr.get = c.cfgObject.cnrSource
cnrWrt.cacheEnabled = true
cnrWrt.eacls = cachedEACLStorage
@ -640,7 +641,7 @@ func (c *usedSpaceService) processLoadValue(_ context.Context, a containerSDK.Si
type morphContainerReader struct {
eacl containerCore.EACLSource
src containerCore.Source
get containerCore.Source
lister interface {
List(*user.ID) ([]cid.ID, error)
@ -648,11 +649,7 @@ type morphContainerReader struct {
}
func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) {
return x.src.Get(id)
}
func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) {
return x.src.DeletionInfo(id)
return x.get.Get(id)
}
func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) {

View file

@ -160,9 +160,8 @@ func initObjectService(c *cfg) {
addPolicer(c, keyStorage, c.bgClientCache)
traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c)
irFetcher := newCachedIRFetcher(createInnerRingFetcher(c))
sPut := createPutSvc(c, keyStorage, &irFetcher)
sPut := createPutSvc(c, keyStorage)
sPutV2 := createPutSvcV2(sPut, keyStorage)
@ -185,7 +184,7 @@ func initObjectService(c *cfg) {
splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2)
aclSvc := createACLServiceV2(c, splitSvc, &irFetcher)
aclSvc := createACLServiceV2(c, splitSvc)
var commonSvc objectService.Common
commonSvc.Init(&c.internals, aclSvc)
@ -296,7 +295,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
)
}
func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service {
func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
var os putsvc.ObjectStorage = engineWithoutNotifications{
@ -321,10 +320,8 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c.netMapSource,
c,
c.cfgNetmap.state,
irFetcher,
putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
putsvc.WithLogger(c.log),
putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
}
@ -408,13 +405,14 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi
)
}
func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter, irFetcher *cachedIRFetcher) v2.Service {
func createACLServiceV2(c *cfg, splitSvc *objectService.TransportSplitter) v2.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
irFetcher := createInnerRingFetcher(c)
return v2.New(
splitSvc,
c.netMapSource,
irFetcher,
newCachedIRFetcher(irFetcher),
acl.NewChecker(
c.cfgNetmap.state,
c.cfgObject.eaclSource,

View file

@ -31,10 +31,6 @@ func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
return c.src.Get(id)
}
func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
return c.src.DeletionInfo(cid)
}
func (c cnrSource) List() ([]cid.ID, error) {
return c.cli.ContainersOf(nil)
}

View file

@ -86,7 +86,6 @@ FROSTFS_REPLICATOR_POOL_SIZE=10
# Object service section
FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100
FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
# Storage engine section
@ -122,7 +121,6 @@ FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob
@ -169,7 +167,6 @@ FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob

View file

@ -130,8 +130,7 @@
},
"put": {
"pool_size_remote": 100,
"pool_size_local": 200,
"skip_session_token_issuer_verification": true
"pool_size_local": 200
}
},
"storage": {
@ -169,8 +168,7 @@
"size": 4194304,
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
"leaf_width": 10
"opened_cache_capacity": 50
},
{
"type": "fstree",
@ -220,8 +218,7 @@
"size": 4194304,
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
"leaf_width": 10
"opened_cache_capacity": 50
},
{
"type": "fstree",

View file

@ -110,7 +110,6 @@ object:
put:
pool_size_remote: 100 # number of async workers for remote PUT operations
pool_size_local: 200 # number of async workers for local PUT operations
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
@ -146,7 +145,6 @@ storage:
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
leaf_width: 10 # max count of key-value DB on leafs of object tree storage
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS

2
debian/control vendored
View file

@ -1,7 +1,7 @@
Source: frostfs-node
Section: misc
Priority: optional
Maintainer: TrueCloudLab <tech@frostfs.info>
Maintainer: NeoSPCC <tech@nspcc.ru>
Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts
Standards-Version: 4.5.1
Homepage: https://fs.neo.org/

4
go.mod
View file

@ -6,12 +6,11 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230627134746-36f3d39c406a
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230828082657-84e7e69f98ac
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230809065235-d48788c7a946
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
github.com/dgraph-io/ristretto v0.1.1
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/google/uuid v1.3.0
github.com/hashicorp/golang-lru/v2 v2.0.4
@ -43,6 +42,7 @@ require (
)
require (
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.1.0 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -17,462 +17,467 @@ const (
)
const (
InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations"
InnerringCantStopEpochEstimation = "can't stop epoch estimation"
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain"
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain"
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
InnerringCantGetInnerRingIndex = "can't get inner ring index"
InnerringCantGetInnerRingSize = "can't get inner ring size"
InnerringCantGetAlphabetIndex = "can't get alphabet index"
InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range"
InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list"
InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract"
InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number"
InnerringNotarySupport = "notary support"
InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled"
InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled"
InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global"
InnerringCantVoteForPreparedValidators = "can't vote for prepared validators"
InnerringNewBlock = "new block"
InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain"
NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object"
PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected"
PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance"
PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK"
PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected"
PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy"
PolicerRoutineStopped = "routine stopped"
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication"
PolicerPoolSubmission = "pool submission"
ReplicatorFinishWork = "finish work"
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage"
ReplicatorCouldNotReplicateObject = "could not replicate object"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree"
TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes"
TreeSyncingTrees = "syncing trees..."
TreeCouldNotFetchContainers = "could not fetch containers"
TreeTreesHaveBeenSynchronized = "trees have been synchronized"
TreeSyncingContainerTrees = "syncing container trees..."
TreeCouldNotSyncTrees = "could not sync trees"
TreeContainerTreesHaveBeenSynced = "container trees have been synced"
TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
TreeRemovingRedundantTrees = "removing redundant trees..."
TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
TreeDoNotSendUpdateToTheNode = "do not send update to the node"
TreeFailedToSentUpdateToTheNode = "failed to sent update to the node"
TreeErrorDuringReplication = "error during replication"
PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage"
PersistentCouldNotDeleteSToken = "could not delete token"
PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens"
ControllerReportIsAlreadyStarted = "report is already started"
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source"
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY"
DeleteAssemblingChain = "assembling chain..."
DeleteCollectingChildren = "collecting children..."
DeleteSupplementBySplitID = "supplement by split ID"
DeleteFormingTombstoneStructure = "forming tombstone structure..."
DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..."
DeleteFormingSplitInfo = "forming split info..."
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..."
DeleteMembersSuccessfullyCollected = "members successfully collected"
GetRemoteCallFailed = "remote call failed"
GetCanNotAssembleTheObject = "can not assemble the object"
GetTryingToAssembleTheObject = "trying to assemble the object..."
GetAssemblingSplittedObject = "assembling splitted object..."
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
GetFailedToAssembleSplittedObject = "failed to assemble splitted object"
GetCouldNotGenerateContainerTraverser = "could not generate container traverser"
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client"
GetCouldNotWriteHeader = "could not write header"
GetCouldNotWritePayloadChunk = "could not write payload chunk"
GetLocalGetFailed = "local get failed"
GetReturnResultDirectly = "return result directly"
GetCompletingTheOperation = "completing the operation"
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed"
GetRequestedObjectIsVirtual = "requested object is virtual"
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds"
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure"
SearchReturnResultDirectly = "return result directly"
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client"
SearchRemoteOperationFailed = "remote operation failed"
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser"
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers"
SearchLocalOperationFailed = "local operation failed"
UtilObjectServiceError = "object service error"
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool"
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring"
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node"
NatsNatsConnectionWasLost = "nats: connection was lost"
NatsNatsReconnectedToTheServer = "nats: reconnected to the server"
NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done"
ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics"
ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics"
ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator"
ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted"
ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements"
ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished"
ControllerAnnouncementIsAlreadyStarted = "announcement is already started"
ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted"
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted"
ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements"
ControllerCouldNotInitializeResultTarget = "could not initialize result target"
ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted"
ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations"
RouteCouldNotInitializeWriterProvider = "could not initialize writer provider"
RouteCouldNotInitializeWriter = "could not initialize writer"
RouteCouldNotPutTheValue = "could not put the value"
RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer"
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch"
ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch"
ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch"
ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node"
ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established"
ClientSwitchingToTheNextRPCNode = "switching to the next RPC node"
ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node"
ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node"
ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC"
ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node"
ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
ClientNotaryDepositInvoke = "notary deposit invoke"
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked"
ClientNotaryRequestInvoked = "notary request invoked"
ClientNeoClientInvoke = "neo client invoke"
ClientNativeGasTransferInvoke = "native gas transfer invoke"
ClientBatchGasTransferInvoke = "batch gas transfer invoke"
ClientCantGetBlockchainHeight = "can't get blockchain height"
ClientCantGetBlockchainHeight243 = "can't get blockchain height"
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool"
EventCouldNotStartListenToEvents = "could not start listen to events"
EventStopEventListenerByError = "stop event listener by error"
EventStopEventListenerByContext = "stop event listener by context"
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel"
EventNilNotificationEventWasCaught = "nil notification event was caught"
EventStopEventListenerByNotaryChannel = "stop event listener by notary channel"
EventNilNotaryEventWasCaught = "nil notary event was caught"
EventStopEventListenerByBlockChannel = "stop event listener by block channel"
EventNilBlockWasCaught = "nil block was caught"
EventListenerWorkerPoolDrained = "listener worker pool drained"
EventEventParserNotSet = "event parser not set"
EventCouldNotParseNotificationEvent = "could not parse notification event"
EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered"
EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event"
EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event"
EventNotaryParserNotSet = "notary parser not set"
EventCouldNotParseNotaryEvent = "could not parse notary event"
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered"
EventIgnoreNilEventParser = "ignore nil event parser"
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser"
EventRegisteredNewEventParser = "registered new event parser"
EventIgnoreNilEventHandler = "ignore nil event handler"
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser"
EventRegisteredNewEventHandler = "registered new event handler"
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser"
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser"
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler"
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser"
EventIgnoreNilBlockHandler = "ignore nil block handler"
SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed"
SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct"
SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain"
SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block"
SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct"
SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain"
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB"
BlobovniczaOpeningBoltDB = "opening BoltDB"
BlobovniczaInitializing = "initializing..."
BlobovniczaAlreadyInitialized = "already initialized"
BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range"
BlobovniczaClosingBoltDB = "closing BoltDB"
BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket"
BlobstorOpening = "opening..."
BlobstorInitializing = "initializing..."
BlobstorClosing = "closing..."
BlobstorCouldntCloseStorage = "couldn't close storage"
BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking"
BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration"
EngineShardHasBeenRemoved = "shard has been removed"
EngineCouldNotCloseRemovedShard = "could not close removed shard"
EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping"
EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard"
EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping"
EngineCouldNotCloseShard = "could not close shard"
EngineCouldNotReloadAShard = "could not reload a shard"
EngineAddedNewShard = "added new shard"
EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation"
EngineCouldNotPutObjectToShard = "could not put object to shard"
EngineErrorDuringSearchingForObjectChildren = "error during searching for object children"
EngineCouldNotInhumeObjectInShard = "could not inhume object in shard"
EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies"
EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine"
EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies"
EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check"
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks"
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks"
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only"
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode"
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold"
EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold"
EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request"
EngineStartedShardsEvacuation = "started shards evacuation"
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
MetabaseMissingMatcher = "missing matcher"
MetabaseErrorInFKBTSelection = "error in FKBT selection"
MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
MetabaseUnknownOperation = "unknown operation"
MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCheckingMetabaseVersion = "checking metabase version"
ShardCantSelectAllObjects = "can't select all objects"
ShardSettingShardMode = "setting shard mode"
ShardShardModeSetSuccessfully = "shard mode set successfully"
ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase"
ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache"
ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase"
ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor"
ShardFetchingObjectWithoutMeta = "fetching object without meta"
ShardObjectIsMissingInWritecache = "object is missing in write-cache"
ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache"
ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor"
ShardMetaObjectCounterRead = "meta: object counter read"
ShardMetaCantReadContainerList = "meta: can't read container list"
ShardMetaCantReadContainerSize = "meta: can't read container size"
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCouldNotUnmarshalObject = "could not unmarshal object"
ShardCouldNotCloseShardComponent = "could not close shard component"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode"
ShardStopEventListenerByClosedChannel = "stop event listener by closed channel"
ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool"
ShardGCIsStopped = "GC is stopped"
ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..."
ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed"
ShardCouldNotDeleteTheObjects = "could not delete the objects"
ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed"
ShardCouldNotInhumeTheObjects = "could not inhume the objects"
ShardStartedExpiredTombstonesHandling = "started expired tombstones handling"
ShardIteratingTombstones = "iterating tombstones"
ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones"
ShardIteratorOverGraveyardFailed = "iterator over graveyard failed"
ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch"
ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling"
ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed"
ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage"
ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records"
ShardFailureToUnlockObjects = "failure to unlock objects"
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go
InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go
InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go
InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go
InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go
InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go
InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go
InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go
InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go
InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go
InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go
InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go
InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go
InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go
InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go
InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go
InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go
InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go
InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go
NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go
NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go
PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go
PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go
PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go
PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go
PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go
PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go
PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go
PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go
ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go
TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go
TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go
TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go
TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go
TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go
TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go
TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go
TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go
TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go
TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go
TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go
TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go
PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go
ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go
DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go
NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go
NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go
ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go
ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go
ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go
ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go
ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go
ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go
ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go
ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go
ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go
ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go
ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go
ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go
EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go
EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go
EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go
EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go
EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go
EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go
EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go
EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go
EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go
EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go
EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go
EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go
EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go
SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go
BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go
BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go
BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go
EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go
EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go
EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go
EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go
EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go
EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go
EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go
EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go
EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go
EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go
EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" // Error in ../node/pkg/local_object_storage/engine/evacuate.go
EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go
MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go
ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go
ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go
ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go
ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go
ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go
ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go
ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go
ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go
ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go
ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go
ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go
ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go
ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go
ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go
ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go
ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
WritecacheBadgerInitExperimental = "initializing badger-backed experimental writecache"
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree"
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks"
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database"
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks"
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantParseAddress = "can't parse address"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
WritecacheDBValueLogGCRunCompleted = "value log GC run completed"
WritecacheBadgerObjAlreadyScheduled = "object already scheduled for flush"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza"
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict"
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..."
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated"
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated"
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level"
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza"
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza"
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza"
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed"
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza"
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza"
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza"
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza"
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's"
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..."
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..."
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza"
AlphabetTick = "tick"
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained"
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event"
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event"
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method"
AlphabetStorageNodeEmissionIsOff = "storage node emission is off"
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes"
AlphabetGasEmission = "gas emission"
AlphabetCantParseNodePublicKey = "can't parse node public key"
AlphabetCantTransferGas = "can't transfer gas"
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet"
AlphabetAlphabetWorkerPool = "alphabet worker pool"
BalanceBalanceWorkerPoolDrained = "balance worker pool drained"
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock"
BalanceCantSendLockAssetTx = "can't send lock asset tx"
BalanceBalanceWorkerPool = "balance worker pool"
ContainerContainerWorkerPool = "container worker pool"
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained"
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put"
ContainerPutContainerCheckFailed = "put container check failed"
ContainerCouldNotApprovePutContainer = "could not approve put container"
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
ContainerSetEACLCheckFailed = "set EACL check failed"
ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind"
FrostFSInvalidManageKeyEvent = "invalid manage key event"
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsWorkerPool = "frostfs worker pool"
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained"
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit"
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract"
FrostFSDoubleMintEmissionDeclined = "double mint emission declined"
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node"
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached"
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver"
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw"
FrostFSCantCreateLockAccount = "can't create lock account"
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw"
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque"
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract"
GovernanceNewEvent = "new event"
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained"
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync"
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net"
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain"
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain"
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed"
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update"
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee"
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update"
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain"
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys"
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list"
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys"
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain"
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract"
NetmapNetmapWorkerPool = "netmap worker pool"
NetmapTick = "tick"
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained"
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled"
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick"
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node"
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap"
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState"
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache"
NetmapCantGetEpochDuration = "can't get epoch duration"
NetmapCantGetTransactionHeight = "can't get transaction height"
NetmapCantResetEpochTimer = "can't reset epoch timer"
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup"
NetmapCantStartContainerSizeEstimation = "can't start container size estimation"
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick"
NetmapNextEpoch = "next epoch"
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch"
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification"
NetmapNonhaltNotaryTransaction = "non-halt notary transaction"
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate"
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate"
NetmapApprovingNetworkMapCandidate = "approving network map candidate"
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer"
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification"
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state"
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer"
FrostFSIRInternalError = "internal error"
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server"
FrostFSIRApplicationStopped = "application stopped"
FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint"
FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint"
FrostFSIRReloadExtraWallets = "reload extra wallets"
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file"
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint"
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint"
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint"
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..."
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop"
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully"
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop"
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance"
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance"
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine"
FrostFSNodeShardAttachedToEngine = "shard attached to engine"
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..."
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure"
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully"
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state"
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state"
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..."
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete"
FrostFSNodeInternalApplicationError = "internal application error"
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete"
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..."
FrostFSNodeConfigurationReading = "configuration reading"
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation"
FrostFSNodeTracingConfigationUpdated = "tracing configation updated"
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update"
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying"
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully"
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract"
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine"
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully"
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedInitTracing = "failed init tracing"
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing"
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client"
FrostFSNodeClosingMorphComponents = "closing morph components..."
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global"
FrostFSNodeNotarySupport = "notary support"
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network"
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made"
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number"
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain"
FrostFSNodeNewBlock = "new block"
FrostFSNodeCantUpdatePersistentState = "can't update persistent state"
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx"
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch"
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit"
FrostFSNodeInitialNetworkState = "initial network state"
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization"
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service"
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container"
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed"
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
FrostFSIRCouldntCreateRPCClientForEndpoint = "could not create RPC client for endpoint" // Debug in ../node/pkg/morph/client/constructor.go
FrostFSIRCreatedRPCClientForEndpoint = "created RPC client for endpoint" // Info in ../node/pkg/morph/client/constructor.go
FrostFSIRReloadExtraWallets = "reload extra wallets" // Info in ../node/cmd/frostfs-ir/config.go
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
FrostFSNodePolicerIsDisabled = "policer is disabled"
CommonApplicationStarted = "application started"
ShardGCCollectingExpiredObjectsStarted = "collecting expired objects started"
@ -504,10 +509,9 @@ const (
TombstoneExpirationParseFailure = "tombstone getter: could not parse tombstone expiration epoch"
FrostFSNodeCantUpdateObjectStorageID = "can't update object storage ID"
FrostFSNodeCantFlushObjectToBlobstor = "can't flush an object to blobstor"
FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB"
FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB"
FrostFSNodeCantDecodeObjectAddressFromDB = "can't decode object address from the DB" // Error in ../node/cmd/frostfs-node/morph.go
FrostFSNodeCantUnmarshalObjectFromDB = "can't unmarshal an object from the DB" // Error in ../node/cmd/frostfs-node/morph.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
FailedToCountWritecacheItems = "failed to count writecache items"
AttemtToCloseAlreadyClosedBlobovnicza = "attempt to close an already closed blobovnicza"
)

View file

@ -20,15 +20,6 @@ type Container struct {
Session *session.Container
}
// DelInfo contains info about removed container.
type DelInfo struct {
// Container owner.
Owner []byte
// Epoch indicates when the container was removed.
Epoch int
}
// Source is an interface that wraps
// basic container receiving method.
type Source interface {
@ -41,8 +32,6 @@ type Source interface {
// Implementations must not retain the container pointer and modify
// the container through it.
Get(cid.ID) (*Container, error)
DeletionInfo(cid.ID) (*DelInfo, error)
}
// EACL groups information about the FrostFS container's extended ACL stored in

View file

@ -1,22 +0,0 @@
package container
import (
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// WasRemoved checks whether the container ever existed or
// it just has not been created yet at the current epoch.
func WasRemoved(s Source, cid cid.ID) (bool, error) {
_, err := s.DeletionInfo(cid)
if err == nil {
return true, nil
}
var errContainerNotFound *apistatus.ContainerNotFound
if errors.As(err, &errContainerNotFound) {
return false, nil
}
return false, err
}

View file

@ -3,17 +3,13 @@ package object
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"errors"
"fmt"
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -24,8 +20,6 @@ import (
// FormatValidator represents an object format validator.
type FormatValidator struct {
*cfg
senderClassifier SenderClassifier
}
// FormatValidatorOption represents a FormatValidator constructor option.
@ -34,11 +28,6 @@ type FormatValidatorOption func(*cfg)
type cfg struct {
netState netmap.State
e LockSource
ir InnerRing
netmap netmap.Source
containers container.Source
log *logger.Logger
verifyTokenIssuer bool
}
// DeleteHandler is an interface of delete queue processor.
@ -91,7 +80,6 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
return &FormatValidator{
cfg: cfg,
senderClassifier: NewSenderClassifier(cfg.ir, cfg.netmap, cfg.log),
}
}
@ -165,52 +153,14 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error {
}
token := obj.SessionToken()
ownerID := *obj.OwnerID()
if token == nil || !token.AssertAuthKey(&key) {
return v.checkOwnerKey(ownerID, key)
}
if v.verifyTokenIssuer {
signerIsIROrContainerNode, err := v.isIROrContainerNode(obj, binKey)
if err != nil {
return err
}
if signerIsIROrContainerNode {
return nil
}
if !token.Issuer().Equals(ownerID) {
return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", v, token.Issuer(), ownerID)
}
return nil
return v.checkOwnerKey(*obj.OwnerID(), key)
}
return nil
}
func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (bool, error) {
cnrID, containerIDSet := obj.ContainerID()
if !containerIDSet {
return false, errNilCID
}
cnrIDBin := make([]byte, sha256.Size)
cnrID.Encode(cnrIDBin)
cnr, err := v.containers.Get(cnrID)
if err != nil {
return false, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err)
}
res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value)
if err != nil {
return false, err
}
return res.Role == acl.RoleContainer || res.Role == acl.RoleInnerRing, nil
}
func (v *FormatValidator) checkOwnerKey(id user.ID, key frostfsecdsa.PublicKey) error {
var id2 user.ID
user.IDFromKey(&id2, (ecdsa.PublicKey)(key))
@ -432,38 +382,3 @@ func WithLockSource(e LockSource) FormatValidatorOption {
c.e = e
}
}
// WithInnerRing return option to set Inner Ring source.
func WithInnerRing(ir InnerRing) FormatValidatorOption {
return func(c *cfg) {
c.ir = ir
}
}
// WithNetmapSource return option to set Netmap source.
func WithNetmapSource(ns netmap.Source) FormatValidatorOption {
return func(c *cfg) {
c.netmap = ns
}
}
// WithContainersSource return option to set Containers source.
func WithContainersSource(cs container.Source) FormatValidatorOption {
return func(c *cfg) {
c.containers = cs
}
}
// WithVerifySessionTokenIssuer return option to set verify session token issuer value.
func WithVerifySessionTokenIssuer(verifySessionTokenIssuer bool) FormatValidatorOption {
return func(c *cfg) {
c.verifyTokenIssuer = verifySessionTokenIssuer
}
}
// WithLogger return option to set logger.
func WithLogger(l *logger.Logger) FormatValidatorOption {
return func(c *cfg) {
c.log = l
}
}

View file

@ -3,27 +3,18 @@ package object
import (
"context"
"crypto/ecdsa"
"fmt"
"strconv"
"testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/google/uuid"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
func blankValidObject(key *ecdsa.PrivateKey) *objectSDK.Object {
@ -65,7 +56,6 @@ func TestFormatValidator_Validate(t *testing.T) {
epoch: curEpoch,
}),
WithLockSource(ls),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
ownerKey, err := keys.NewPrivateKey()
@ -264,356 +254,3 @@ func TestFormatValidator_Validate(t *testing.T) {
})
})
}
func TestFormatValidator_ValidateTokenIssuer(t *testing.T) {
const curEpoch = 13
ls := testLockSource{
m: make(map[oid.Address]bool),
}
signer, err := keys.NewPrivateKey()
require.NoError(t, err)
var owner user.ID
ownerPrivKey, err := keys.NewPrivateKey()
require.NoError(t, err)
user.IDFromKey(&owner, ownerPrivKey.PrivateKey.PublicKey)
t.Run("different issuer and owner, verify issuer disabled", func(t *testing.T) {
t.Parallel()
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(false),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
obj := objectSDK.New()
obj.SetContainerID(cidtest.ID())
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is IR node, verify issuer enabled", func(t *testing.T) {
t.Parallel()
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{signer.PublicKey().Bytes()},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is container node in current epoch, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var node netmap.NodeInfo
node.SetPublicKey(signer.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{node})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is container node in previous epoch, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var issuerNode netmap.NodeInfo
issuerNode.SetPublicKey(signer.PublicKey().Bytes())
var nonIssuerNode netmap.NodeInfo
nonIssuerKey, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode.SetPublicKey(nonIssuerKey.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode})
previousEpochNM := &netmap.NetMap{}
previousEpochNM.SetEpoch(curEpoch - 1)
previousEpochNM.SetNodes([]netmap.NodeInfo{issuerNode})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("different issuer and owner, issuer is unknown, verify issuer enabled", func(t *testing.T) {
t.Parallel()
tok := sessiontest.Object()
fsPubKey := frostfsecdsa.PublicKey(*signer.PublicKey())
tok.SetID(uuid.New())
tok.SetAuthKey(&fsPubKey)
tok.SetExp(100500)
tok.SetIat(1)
tok.SetNbf(1)
require.NoError(t, tok.Sign(signer.PrivateKey))
cnrID := cidtest.ID()
cont := containerSDK.Container{}
cont.Init()
pp := netmap.PlacementPolicy{}
require.NoError(t, pp.DecodeString("REP 1"))
cont.SetPlacementPolicy(pp)
var nonIssuerNode1 netmap.NodeInfo
nonIssuerKey1, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode1.SetPublicKey(nonIssuerKey1.PublicKey().Bytes())
var nonIssuerNode2 netmap.NodeInfo
nonIssuerKey2, err := keys.NewPrivateKey()
require.NoError(t, err)
nonIssuerNode2.SetPublicKey(nonIssuerKey2.PublicKey().Bytes())
currentEpochNM := &netmap.NetMap{}
currentEpochNM.SetEpoch(curEpoch)
currentEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode1})
previousEpochNM := &netmap.NetMap{}
previousEpochNM.SetEpoch(curEpoch - 1)
previousEpochNM.SetNodes([]netmap.NodeInfo{nonIssuerNode2})
obj := objectSDK.New()
obj.SetContainerID(cnrID)
obj.SetSessionToken(tok)
obj.SetOwnerID(&owner)
require.NoError(t, objectSDK.SetIDWithSignature(signer.PrivateKey, obj))
v := NewFormatValidator(
WithNetState(testNetState{
epoch: curEpoch,
}),
WithLockSource(ls),
WithVerifySessionTokenIssuer(true),
WithInnerRing(&testIRSource{
irNodes: [][]byte{},
}),
WithContainersSource(
&testContainerSource{
containers: map[cid.ID]*container.Container{
cnrID: {
Value: cont,
},
},
},
),
WithNetmapSource(
&testNetmapSource{
netmaps: map[uint64]*netmap.NetMap{
curEpoch: currentEpochNM,
curEpoch - 1: previousEpochNM,
},
currentEpoch: curEpoch,
},
),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
)
require.Error(t, v.Validate(context.Background(), obj, false))
})
}
type testIRSource struct {
irNodes [][]byte
}
func (s *testIRSource) InnerRingKeys() ([][]byte, error) {
return s.irNodes, nil
}
type testContainerSource struct {
containers map[cid.ID]*container.Container
}
func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) {
if cnr, found := s.containers[cnrID]; found {
return cnr, nil
}
return nil, fmt.Errorf("container not found")
}
func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) {
return nil, nil
}
type testNetmapSource struct {
netmaps map[uint64]*netmap.NetMap
currentEpoch uint64
}
func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) {
if diff >= s.currentEpoch {
return nil, fmt.Errorf("invalid diff")
}
return s.GetNetMapByEpoch(s.currentEpoch - diff)
}
func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
if nm, found := s.netmaps[epoch]; found {
return nm, nil
}
return nil, fmt.Errorf("netmap not found")
}
func (s *testNetmapSource) Epoch() (uint64, error) {
return s.currentEpoch, nil
}

View file

@ -17,7 +17,6 @@ type Blobovnicza struct {
cfg
dataSize atomic.Uint64
itemsCount atomic.Uint64
boltDB *bbolt.DB

View file

@ -3,6 +3,7 @@ package blobovnicza
import (
"context"
"crypto/rand"
"errors"
"os"
"testing"
@ -63,7 +64,7 @@ func TestBlobovnicza(t *testing.T) {
WithPath(p),
WithObjectSizeLimit(objSizeLim),
WithFullSizeLimit(sizeLim),
WithLogger(test.NewLogger(t, true)),
WithLogger(test.NewLogger(t, false)),
)
defer os.Remove(p)
@ -97,9 +98,9 @@ func TestBlobovnicza(t *testing.T) {
testPutGet(t, blz, oidtest.Address(), objSizeLim, nil, nil)
}
// blobovnizca accepts object event if full
// from now objects should not be saved
testPutGet(t, blz, oidtest.Address(), 1024, func(err error) bool {
return err == nil
return errors.Is(err, ErrFull)
}, nil)
require.NoError(t, blz.Close())

View file

@ -14,7 +14,7 @@ import (
// Open opens an internal database at the configured path with the configured permissions.
//
// If the database file does not exist, it will be created automatically.
// If blobovnicza is already open, does nothing.
// If blobovnizca is already open, does nothing.
func (b *Blobovnicza) Open() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -45,7 +45,7 @@ func (b *Blobovnicza) Open() error {
b.boltDB, err = bbolt.Open(b.path, b.perm, b.boltOptions)
if err == nil {
b.opened = true
b.metrics.IncOpenBlobovniczaCount()
b.metrics.IncOpenBlobovnizcaCount()
}
return err
@ -54,13 +54,13 @@ func (b *Blobovnicza) Open() error {
// Init initializes internal database structure.
//
// If Blobovnicza is already initialized, no action is taken.
// Blobovnicza must be open, otherwise an error will return.
// Blobovnizca must be open, otherwise an error will return.
func (b *Blobovnicza) Init() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
if !b.opened {
return errors.New("blobovnicza is not open")
return errors.New("blobovnizca is not open")
}
b.log.Debug(logs.BlobovniczaInitializing,
@ -68,10 +68,8 @@ func (b *Blobovnicza) Init() error {
zap.Uint64("storage size limit", b.fullSizeLimit),
)
size := b.dataSize.Load()
items := b.itemsCount.Load()
if size != 0 || items != 0 {
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items))
if size := b.dataSize.Load(); size != 0 {
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size))
return nil
}
@ -98,17 +96,14 @@ func (b *Blobovnicza) Init() error {
}
}
return b.initializeCounters()
return b.initializeSize()
}
func (b *Blobovnicza) initializeCounters() error {
func (b *Blobovnicza) initializeSize() error {
var size uint64
var items uint64
err := b.boltDB.View(func(tx *bbolt.Tx) error {
return b.iterateAllBuckets(tx, func(lower, upper uint64, b *bbolt.Bucket) (bool, error) {
keysN := uint64(b.Stats().KeyN)
size += keysN * upper
items += keysN
size += uint64(b.Stats().KeyN) * upper
return false, nil
})
})
@ -116,15 +111,13 @@ func (b *Blobovnicza) initializeCounters() error {
return fmt.Errorf("can't determine DB size: %w", err)
}
b.dataSize.Store(size)
b.itemsCount.Store(items)
b.metrics.AddOpenBlobovniczaSize(size)
b.metrics.AddOpenBlobovniczaItems(items)
b.metrics.AddOpenBlobovnizcaSize(size)
return nil
}
// Close releases all internal database resources.
//
// If blobovnicza is already closed, does nothing.
// If blobovnizca is already closed, does nothing.
func (b *Blobovnicza) Close() error {
b.controlMtx.Lock()
defer b.controlMtx.Unlock()
@ -141,11 +134,9 @@ func (b *Blobovnicza) Close() error {
return err
}
b.metrics.DecOpenBlobovniczaCount()
b.metrics.SubOpenBlobovniczaSize(b.dataSize.Load())
b.metrics.SubOpenBlobovniczaItems(b.itemsCount.Load())
b.metrics.DecOpenBlobovnizcaCount()
b.metrics.SubOpenBlobovnizcaSize(b.dataSize.Load())
b.dataSize.Store(0)
b.itemsCount.Store(0)
b.opened = false

View file

@ -74,7 +74,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err
zap.String("binary size", stringifyByteSize(dataSize)),
zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)),
)
b.itemDeleted(sizeUpperBound)
b.decSize(sizeUpperBound)
}
return DeleteRes{}, err

View file

@ -1,21 +1,16 @@
package blobovnicza
type Metrics interface {
IncOpenBlobovniczaCount()
DecOpenBlobovniczaCount()
IncOpenBlobovnizcaCount()
DecOpenBlobovnizcaCount()
AddOpenBlobovniczaSize(size uint64)
SubOpenBlobovniczaSize(size uint64)
AddOpenBlobovniczaItems(items uint64)
SubOpenBlobovniczaItems(items uint64)
AddOpenBlobovnizcaSize(size uint64)
SubOpenBlobovnizcaSize(size uint64)
}
type NoopMetrics struct{}
func (m *NoopMetrics) IncOpenBlobovniczaCount() {}
func (m *NoopMetrics) DecOpenBlobovniczaCount() {}
func (m *NoopMetrics) AddOpenBlobovniczaSize(uint64) {}
func (m *NoopMetrics) SubOpenBlobovniczaSize(uint64) {}
func (m *NoopMetrics) AddOpenBlobovniczaItems(uint64) {}
func (m *NoopMetrics) SubOpenBlobovniczaItems(uint64) {}
func (m *NoopMetrics) IncOpenBlobovnizcaCount() {}
func (m *NoopMetrics) DecOpenBlobovnizcaCount() {}
func (m *NoopMetrics) AddOpenBlobovnizcaSize(uint64) {}
func (m *NoopMetrics) SubOpenBlobovnizcaSize(uint64) {}

View file

@ -23,6 +23,10 @@ type PutPrm struct {
type PutRes struct {
}
// ErrFull is returned when trying to save an
// object to a filled blobovnicza.
var ErrFull = logicerr.New("blobovnicza is full")
// SetAddress sets the address of the saving object.
func (p *PutPrm) SetAddress(addr oid.Address) {
p.addr = addr
@ -61,6 +65,10 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
key := addressKey(prm.addr)
err := b.boltDB.Batch(func(tx *bbolt.Tx) error {
if b.full() {
return ErrFull
}
buck := tx.Bucket(bucketName)
if buck == nil {
// expected to happen:
@ -77,7 +85,7 @@ func (b *Blobovnicza) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
return nil
})
if err == nil {
b.itemAdded(upperBound)
b.incSize(upperBound)
}
return PutRes{}, err

View file

@ -3,7 +3,6 @@ package blobovnicza
import (
"encoding/binary"
"fmt"
"math"
"math/bits"
"strconv"
)
@ -41,20 +40,16 @@ func upperPowerOfTwo(v uint64) uint64 {
return 1 << bits.Len64(v-1)
}
func (b *Blobovnicza) itemAdded(itemSize uint64) {
b.dataSize.Add(itemSize)
b.itemsCount.Add(1)
b.metrics.AddOpenBlobovniczaSize(itemSize)
b.metrics.AddOpenBlobovniczaItems(1)
func (b *Blobovnicza) incSize(sz uint64) {
b.dataSize.Add(sz)
b.metrics.AddOpenBlobovnizcaSize(sz)
}
func (b *Blobovnicza) itemDeleted(itemSize uint64) {
b.dataSize.Add(^(itemSize - 1))
b.itemsCount.Add(math.MaxUint64)
b.metrics.SubOpenBlobovniczaSize(itemSize)
b.metrics.SubOpenBlobovniczaItems(1)
func (b *Blobovnicza) decSize(sz uint64) {
b.dataSize.Add(^(sz - 1))
b.metrics.SubOpenBlobovnizcaSize(sz)
}
func (b *Blobovnicza) IsFull() bool {
func (b *Blobovnicza) full() bool {
return b.dataSize.Load() >= b.fullSizeLimit
}

View file

@ -1,213 +0,0 @@
package blobovniczatree
import (
"path/filepath"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
)
type activeDB struct {
blz *blobovnicza.Blobovnicza
shDB *sharedDB
}
func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza {
return db.blz
}
func (db *activeDB) Close() {
db.shDB.Close()
}
func (db *activeDB) Path() string {
return db.shDB.Path()
}
// activeDBManager manages active blobovnicza instances (that is, those that are being used for Put).
//
// Uses dbManager for opening/closing sharedDB instances.
// Stores a reference to an open active sharedDB, so dbManager does not close it.
// When changing the active sharedDB, releases the reference to the previous active sharedDB.
type activeDBManager struct {
levelToActiveDBGuard *sync.RWMutex
levelToActiveDB map[string]*sharedDB
levelLock *utilSync.KeyLocker[string]
closed bool
dbManager *dbManager
leafWidth uint64
}
func newActiveDBManager(dbManager *dbManager, leafWidth uint64) *activeDBManager {
return &activeDBManager{
levelToActiveDBGuard: &sync.RWMutex{},
levelToActiveDB: make(map[string]*sharedDB),
levelLock: utilSync.NewKeyLocker[string](),
dbManager: dbManager,
leafWidth: leafWidth,
}
}
// GetOpenedActiveDBForLevel returns active DB for level.
// DB must be closed after use.
func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) {
activeDB, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
if activeDB != nil {
return activeDB, nil
}
return m.updateAndGetActive(lvlPath)
}
func (m *activeDBManager) Open() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
m.closed = false
}
func (m *activeDBManager) Close() {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
for _, db := range m.levelToActiveDB {
db.Close()
}
m.levelToActiveDB = make(map[string]*sharedDB)
m.closed = true
}
func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
if m.closed {
return nil, errClosed
}
db, ok := m.levelToActiveDB[lvlPath]
if !ok {
return nil, nil
}
blz, err := db.Open() //open db for usage, will be closed on activeDB.Close()
if err != nil {
return nil, err
}
if blz.IsFull() {
db.Close()
return nil, nil
}
return &activeDB{
blz: blz,
shDB: db,
}, nil
}
func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) {
m.levelLock.Lock(lvlPath)
defer m.levelLock.Unlock(lvlPath)
current, err := m.getCurrentActiveIfOk(lvlPath)
if err != nil {
return nil, err
}
if current != nil {
return current, nil
}
nextShDB, err := m.getNextSharedDB(lvlPath)
if err != nil {
return nil, err
}
if nextShDB == nil {
return nil, nil
}
blz, err := nextShDB.Open() // open db for client, client must call Close() after usage
if err != nil {
return nil, err
}
return &activeDB{
blz: blz,
shDB: nextShDB,
}, nil
}
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
var idx uint64
var iterCount uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
idx = (currentIdx + 1) % m.leafWidth
}
var next *sharedDB
for iterCount < m.leafWidth {
path := filepath.Join(lvlPath, u64ToHexString(idx))
shDB := m.dbManager.GetByPath(path)
db, err := shDB.Open() //open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
if db.IsFull() {
shDB.Close()
} else {
next = shDB
break
}
idx = (idx + 1) % m.leafWidth
iterCount++
}
previous, updated := m.replace(lvlPath, next)
if !updated && next != nil {
next.Close() // manager is closed, so don't hold active DB open
}
if updated && previous != nil {
previous.Close()
}
return next, nil
}
func (m *activeDBManager) hasActiveDB(lvlPath string) (bool, uint64) {
m.levelToActiveDBGuard.RLock()
defer m.levelToActiveDBGuard.RUnlock()
if m.closed {
return false, 0
}
db, ok := m.levelToActiveDB[lvlPath]
if !ok {
return false, 0
}
return true, u64FromHexString(filepath.Base(db.Path()))
}
func (m *activeDBManager) replace(lvlPath string, shDB *sharedDB) (*sharedDB, bool) {
m.levelToActiveDBGuard.Lock()
defer m.levelToActiveDBGuard.Unlock()
if m.closed {
return nil, false
}
previous := m.levelToActiveDB[lvlPath]
if shDB == nil {
delete(m.levelToActiveDB, lvlPath)
} else {
m.levelToActiveDB[lvlPath] = shDB
}
return previous, true
}

View file

@ -3,12 +3,19 @@ package blobovniczatree
import (
"errors"
"fmt"
"path/filepath"
"strconv"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
)
// Blobovniczas represents the storage of the "small" objects.
@ -54,16 +61,35 @@ import (
type Blobovniczas struct {
cfg
commondbManager *dbManager
activeDBManager *activeDBManager
dbCache *dbCache
// cache of opened filled Blobovniczas
opened *simplelru.LRU[string, *blobovnicza.Blobovnicza]
// lruMtx protects opened cache.
// It isn't RWMutex because `Get` calls must
// lock this mutex on write, as LRU info is updated.
// It must be taken after activeMtx in case when eviction is possible
// i.e. `Add`, `Purge` and `Remove` calls.
lruMtx sync.Mutex
// mutex to exclude parallel bbolt.Open() calls
// bbolt.Open() deadlocks if it tries to open already opened file
openMtx sync.Mutex
// list of active (opened, non-filled) Blobovniczas
activeMtx sync.RWMutex
active map[string]blobovniczaWithIndex
}
type blobovniczaWithIndex struct {
ind uint64
blz *blobovnicza.Blobovnicza
}
var _ common.Storage = (*Blobovniczas)(nil)
var errPutFailed = errors.New("could not save the object in any blobovnicza")
// NewBlobovniczaTree returns new instance of blobovniczas tree.
// NewBlobovniczaTree returns new instance of blobovnizas tree.
func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
blz = new(Blobovniczas)
initConfig(&blz.cfg)
@ -72,17 +98,116 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
opts[i](&blz.cfg)
}
if blz.blzLeafWidth == 0 {
blz.blzLeafWidth = blz.blzShallowWidth
cache, err := simplelru.NewLRU[string, *blobovnicza.Blobovnicza](blz.openedCacheSize, func(p string, value *blobovnicza.Blobovnicza) {
lvlPath := filepath.Dir(p)
if b, ok := blz.active[lvlPath]; ok && b.ind == u64FromHexString(filepath.Base(p)) {
// This branch is taken if we have recently updated active blobovnicza and remove
// it from opened cache.
return
} else if err := value.Close(); err != nil {
blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", p),
zap.String("error", err.Error()),
)
} else {
blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict,
zap.String("id", p),
)
}
})
if err != nil {
// occurs only if the size is not positive
panic(fmt.Errorf("could not create LRU cache of size %d: %w", blz.openedCacheSize, err))
}
blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.blzLeafWidth, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.blzLeafWidth)
blz.dbCache = newDBCache(blz.openedCacheSize, blz.commondbManager)
cp := uint64(1)
for i := uint64(0); i < blz.blzShallowDepth; i++ {
cp *= blz.blzShallowWidth
}
blz.opened = cache
blz.active = make(map[string]blobovniczaWithIndex, cp)
return blz
}
// activates and returns activated blobovnicza of p-level (dir).
//
// returns error if blobvnicza could not be activated.
func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error) {
return b.updateAndGet(lvlPath, nil)
}
// updates active blobovnicza of p-level (dir).
//
// if current active blobovnicza's index is not old, it remains unchanged.
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath))
_, err := b.updateAndGet(lvlPath, old)
b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath))
return err
}
// updates and returns active blobovnicza of p-level (dir).
//
// if current active blobovnicza's index is not old, it is returned unchanged.
func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWithIndex, error) {
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok {
if old != nil {
if active.ind == b.blzShallowWidth-1 {
return active, logicerr.New("no more Blobovniczas")
} else if active.ind != *old {
// sort of CAS in order to control concurrent
// updateActive calls
return active, nil
}
} else {
return active, nil
}
active.ind++
}
var err error
if active.blz, err = b.openBlobovnicza(filepath.Join(lvlPath, u64ToHexString(active.ind))); err != nil {
return active, err
}
b.activeMtx.Lock()
defer b.activeMtx.Unlock()
// check 2nd time to find out if it blobovnicza was activated while thread was locked
tryActive, ok := b.active[lvlPath]
if ok && tryActive.blz == active.blz {
return tryActive, nil
}
// Remove from opened cache (active blobovnicza should always be opened).
// Because `onEvict` callback is called in `Remove`, we need to update
// active map beforehand.
b.active[lvlPath] = active
activePath := filepath.Join(lvlPath, u64ToHexString(active.ind))
b.lruMtx.Lock()
b.opened.Remove(activePath)
if ok {
b.opened.Add(filepath.Join(lvlPath, u64ToHexString(tryActive.ind)), tryActive.blz)
}
b.lruMtx.Unlock()
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated,
zap.String("path", activePath))
return active, nil
}
// returns hash of the object address.
func addressHash(addr *oid.Address, path string) uint64 {
var a string

View file

@ -1,103 +0,0 @@
package blobovniczatree
import (
"fmt"
"sync"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
// dbCache caches sharedDB instances that are NOT open for Put.
//
// Uses dbManager for opening/closing sharedDB instances.
// Stores a reference to an cached sharedDB, so dbManager does not close it.
type dbCache struct {
cacheGuard *sync.RWMutex
cache simplelru.LRUCache[string, *sharedDB]
pathLock *utilSync.KeyLocker[string]
closed bool
dbManager *dbManager
}
func newDBCache(size int, dbManager *dbManager) *dbCache {
cache, err := simplelru.NewLRU[string, *sharedDB](size, func(_ string, evictedDB *sharedDB) {
evictedDB.Close()
})
if err != nil {
// occurs only if the size is not positive
panic(fmt.Errorf("could not create LRU cache of size %d: %w", size, err))
}
return &dbCache{
cacheGuard: &sync.RWMutex{},
cache: cache,
dbManager: dbManager,
pathLock: utilSync.NewKeyLocker[string](),
}
}
func (c *dbCache) Open() {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
c.closed = false
}
func (c *dbCache) Close() {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
c.cache.Purge()
c.closed = true
}
func (c *dbCache) GetOrCreate(path string) *sharedDB {
value := c.getExisted(path)
if value != nil {
return value
}
return c.create(path)
}
func (c *dbCache) getExisted(path string) *sharedDB {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
if value, ok := c.cache.Get(path); ok {
return value
}
return nil
}
func (c *dbCache) create(path string) *sharedDB {
c.pathLock.Lock(path)
defer c.pathLock.Unlock(path)
value := c.getExisted(path)
if value != nil {
return value
}
value = c.dbManager.GetByPath(path)
_, err := value.Open() //open db to hold reference, closed by evictedDB.Close() or if cache closed
if err != nil {
return value
}
if added := c.put(path, value); !added {
value.Close()
}
return value
}
func (c *dbCache) put(path string, db *sharedDB) bool {
c.cacheGuard.Lock()
defer c.cacheGuard.Unlock()
if !c.closed {
c.cache.Add(path, db)
return true
}
return false
}

View file

@ -1,59 +0,0 @@
package blobovniczatree
import (
"context"
"sync"
"sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/stretchr/testify/require"
)
func TestBlobovniczaTree_Concurrency(t *testing.T) {
t.Parallel()
const n = 1000
st := NewBlobovniczaTree(
WithLogger(test.NewLogger(t, true)),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(10),
WithBlobovniczaShallowDepth(1),
WithRootPath(t.TempDir()))
require.NoError(t, st.Open(false))
require.NoError(t, st.Init())
t.Cleanup(func() {
require.NoError(t, st.Close())
})
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
var cnt atomic.Int64
var wg sync.WaitGroup
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for cnt.Add(1) <= n {
obj := objGen.Next()
addr := testutil.AddressFromObject(t, obj)
raw, err := obj.Marshal()
require.NoError(t, err)
_, err = st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
})
require.NoError(t, err)
_, err = st.Get(context.Background(), common.GetPrm{Address: addr})
require.NoError(t, err)
}
}()
}
wg.Wait()
}

View file

@ -2,8 +2,11 @@ package blobovniczatree
import (
"context"
"fmt"
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"go.uber.org/zap"
)
@ -11,7 +14,6 @@ import (
func (b *Blobovniczas) Open(readOnly bool) error {
b.readOnly = readOnly
b.metrics.SetMode(readOnly)
b.openManagers()
return nil
}
@ -27,40 +29,115 @@ func (b *Blobovniczas) Init() error {
}
return b.iterateLeaves(context.TODO(), func(p string) (bool, error) {
shBlz := b.getBlobovniczaWithoutCaching(p)
_, err := shBlz.Open()
blz, err := b.openBlobovniczaNoCache(p)
if err != nil {
return true, err
}
defer shBlz.Close()
defer blz.Close()
if err := blz.Init(); err != nil {
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
}
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
return false, nil
})
}
func (b *Blobovniczas) openManagers() {
b.commondbManager.Open() //order important
b.activeDBManager.Open()
b.dbCache.Open()
}
// Close implements common.Storage.
func (b *Blobovniczas) Close() error {
b.dbCache.Close() //order important
b.activeDBManager.Close()
b.commondbManager.Close()
b.activeMtx.Lock()
b.lruMtx.Lock()
for p, v := range b.active {
if err := v.blz.Close(); err != nil {
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", p),
zap.String("error", err.Error()),
)
}
b.opened.Remove(p)
}
for _, k := range b.opened.Keys() {
blz, _ := b.opened.Get(k)
if err := blz.Close(); err != nil {
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", k),
zap.String("error", err.Error()),
)
}
b.opened.Remove(k)
}
b.active = make(map[string]blobovniczaWithIndex)
b.metrics.Close()
b.lruMtx.Unlock()
b.activeMtx.Unlock()
return nil
}
// returns blobovnicza with path p
// opens and returns blobovnicza with path p.
//
// If blobovnicza is already cached, instance from cache is returned w/o changes.
func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB {
return b.dbCache.GetOrCreate(p)
// If blobovnicza is already opened and cached, instance from cache is returned w/o changes.
func (b *Blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, error) {
b.lruMtx.Lock()
v, ok := b.opened.Get(p)
b.lruMtx.Unlock()
if ok {
// blobovnicza should be opened in cache
return v, nil
}
lvlPath := filepath.Dir(p)
curIndex := u64FromHexString(filepath.Base(p))
b.activeMtx.RLock()
defer b.activeMtx.RUnlock()
active, ok := b.active[lvlPath]
if ok && active.ind == curIndex {
return active.blz, nil
}
b.lruMtx.Lock()
defer b.lruMtx.Unlock()
v, ok = b.opened.Get(p)
if ok {
return v, nil
}
blz, err := b.openBlobovniczaNoCache(p)
if err != nil {
return nil, err
}
b.opened.Add(p, blz)
return blz, nil
}
func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB {
return b.commondbManager.GetByPath(p)
func (b *Blobovniczas) openBlobovniczaNoCache(p string) (*blobovnicza.Blobovnicza, error) {
b.openMtx.Lock()
defer b.openMtx.Unlock()
path := filepath.Join(b.rootPath, p)
blz := blobovnicza.New(append(b.blzOpts,
blobovnicza.WithReadOnly(b.readOnly),
blobovnicza.WithPath(path),
blobovnicza.WithMetrics(b.metrics.Blobovnizca()),
)...)
if err := blz.Open(); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
if err := blz.Init(); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", p, err)
}
return blz, nil
}

View file

@ -3,6 +3,7 @@ package blobovniczatree
import (
"context"
"encoding/hex"
"path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -47,12 +48,10 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(id.String())
if err != nil {
return res, err
}
defer shBlz.Close()
if res, err = b.deleteObject(ctx, blz, bPrm); err == nil {
success = true
@ -60,10 +59,16 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
return res, err
}
activeCache := make(map[string]struct{})
objectFound := false
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.deleteObjectFromLevel(ctx, bPrm, p)
dirPath := filepath.Dir(p)
// don't process active blobovnicza of the level twice
_, ok := activeCache[dirPath]
res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
@ -73,6 +78,8 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
}
}
activeCache[dirPath] = struct{}{}
if err == nil {
objectFound = true
}
@ -93,13 +100,57 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
// tries to delete object from particular blobovnicza.
//
// returns no error if object was removed from some blobovnicza of the same level.
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) {
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool) (common.DeleteRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to remove from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.deleteObject(ctx, v, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.deleteObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside)
blz, err := b.openBlobovnicza(blzPath)
if err != nil {
return common.DeleteRes{}, err
}
defer shBlz.Close()
return b.deleteObject(ctx, blz, prm)
}

View file

@ -7,8 +7,6 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
)
var errClosed = logicerr.New("blobvnicza is closed")
func isErrOutOfRange(err error) bool {
var target *apistatus.ObjectOutOfRange
return errors.As(err, &target)

View file

@ -3,6 +3,7 @@ package blobovniczatree
import (
"context"
"encoding/hex"
"path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -36,22 +37,26 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(id.String())
if err != nil {
return common.ExistsRes{}, err
}
defer shBlz.Close()
exists, err := blz.Exists(ctx, prm.Address)
return common.ExistsRes{Exists: exists}, err
}
activeCache := make(map[string]struct{})
var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address)
err := b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
_, err := b.getObjectFromLevel(ctx, gPrm, p)
dirPath := filepath.Dir(p)
_, ok := activeCache[dirPath]
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
@ -60,6 +65,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
}
}
activeCache[dirPath] = struct{}{}
found = err == nil
return found, nil
})

View file

@ -9,15 +9,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestExistsInvalidStorageID(t *testing.T) {
dir := t.TempDir()
b := NewBlobovniczaTree(
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithObjectSizeLimit(1024),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),

View file

@ -5,7 +5,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
func TestGeneric(t *testing.T) {
@ -13,7 +14,7 @@ func TestGeneric(t *testing.T) {
helper := func(t *testing.T, dir string) common.Storage {
return NewBlobovniczaTree(
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
@ -40,7 +41,7 @@ func TestControl(t *testing.T) {
newTree := func(t *testing.T) common.Storage {
return NewBlobovniczaTree(
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithObjectSizeLimit(maxObjectSize),
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/hex"
"fmt"
"path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -47,12 +48,10 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(id.String())
if err != nil {
return res, err
}
defer shBlz.Close()
res, err = b.getObject(ctx, blz, bPrm)
if err == nil {
@ -62,8 +61,14 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
return res, err
}
activeCache := make(map[string]struct{})
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.getObjectFromLevel(ctx, bPrm, p)
dirPath := filepath.Dir(p)
_, ok := activeCache[dirPath]
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil {
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
@ -73,6 +78,8 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
}
}
activeCache[dirPath] = struct{}{}
// abort iterator if found, otherwise process all Blobovniczas
return err == nil, nil
})
@ -91,14 +98,58 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
// tries to read object from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) {
func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string, tryActive bool) (common.GetRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.getObject(ctx, v, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the freshest objects are probably the most demanded;
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside)
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(blzPath)
if err != nil {
return common.GetRes{}, err
}
defer shBlz.Close()
return b.getObject(ctx, blz, prm)
}

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/hex"
"fmt"
"path/filepath"
"strconv"
"time"
@ -46,12 +47,10 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
shBlz := b.getBlobovnicza(id.String())
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(id.String())
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
res, err := b.getObjectRange(ctx, blz, prm)
if err == nil {
@ -61,10 +60,15 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
return res, err
}
activeCache := make(map[string]struct{})
objectFound := false
err = b.iterateSortedLeaves(ctx, &prm.Address, func(p string) (bool, error) {
res, err = b.getRangeFromLevel(ctx, prm, p)
dirPath := filepath.Dir(p)
_, ok := activeCache[dirPath]
res, err = b.getRangeFromLevel(ctx, prm, p, !ok)
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !client.IsErrObjectNotFound(err) {
@ -78,6 +82,8 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
}
}
activeCache[dirPath] = struct{}{}
objectFound = err == nil
// abort iterator if found, otherwise process all Blobovniczas
@ -100,14 +106,68 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
// tries to read range of object payload data from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) {
func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string, tryActive bool) (common.GetRangeRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
b.lruMtx.Lock()
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
res, err := b.getObjectRange(ctx, v, prm)
switch {
case err == nil,
isErrOutOfRange(err):
return res, err
default:
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
}
// therefore the object is possibly placed in a lighter blobovnicza
// next we check in the active level blobobnicza:
// * the freshest objects are probably the most demanded;
// * the active blobovnicza is always opened.
b.activeMtx.RLock()
active, ok := b.active[lvlPath]
b.activeMtx.RUnlock()
if ok && tryActive {
res, err := b.getObjectRange(ctx, active.blz, prm)
switch {
case err == nil,
isErrOutOfRange(err):
return res, err
default:
if !client.IsErrObjectNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
}
}
}
// then object is possibly placed in closed blobovnicza
// check if it makes sense to try to open the blob
// (Blobovniczas "after" the active one are empty anyway,
// and it's pointless to open them).
if u64FromHexString(filepath.Base(blzPath)) > active.ind {
return common.GetRangeRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
}
// open blobovnicza (cached inside)
shBlz := b.getBlobovnicza(blzPath)
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(blzPath)
if err != nil {
return common.GetRangeRes{}, err
}
defer shBlz.Close()
return b.getObjectRange(ctx, blz, prm)
}

View file

@ -68,15 +68,13 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
// iterator over all Blobovniczas in unsorted order. Break on f's error return.
func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error {
return b.iterateLeaves(ctx, func(p string) (bool, error) {
shBlz := b.getBlobovnicza(p)
blz, err := shBlz.Open()
blz, err := b.openBlobovnicza(p)
if err != nil {
if ignoreErrors {
return false, nil
}
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
}
defer shBlz.Close()
err = f(p, blz)
@ -117,12 +115,7 @@ func (b *Blobovniczas) iterateDeepest(ctx context.Context, addr oid.Address, f f
// iterator over particular level of directories.
func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, curPath []string, execDepth uint64, f func([]string) (bool, error)) (bool, error) {
isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
levelWidth := b.blzShallowWidth
if isLeafLevel {
levelWidth = b.blzLeafWidth
}
indices := indexSlice(levelWidth)
indices := indexSlice(b.blzShallowWidth)
hrw.SortSliceByValue(indices, addressHash(addr, filepath.Join(curPath...)))

View file

@ -1,242 +0,0 @@
package blobovniczatree
import (
"fmt"
"path/filepath"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
// sharedDB is responsible for opening and closing a file of single blobovnicza.
type sharedDB struct {
guard *sync.RWMutex
blcza *blobovnicza.Blobovnicza
refCount uint32
openDBCounter *openDBCounter
closedFlag *atomic.Bool
options []blobovnicza.Option
path string
readOnly bool
metrics blobovnicza.Metrics
log *logger.Logger
}
func newSharedDB(options []blobovnicza.Option, path string, readOnly bool,
metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger) *sharedDB {
return &sharedDB{
guard: &sync.RWMutex{},
options: options,
path: path,
readOnly: readOnly,
metrics: metrics,
closedFlag: closedFlag,
log: log,
openDBCounter: openDBCounter,
}
}
func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) {
if b.closedFlag.Load() {
return nil, errClosed
}
b.guard.Lock()
defer b.guard.Unlock()
if b.refCount > 0 {
b.refCount++
return b.blcza, nil
}
blz := blobovnicza.New(append(b.options,
blobovnicza.WithReadOnly(b.readOnly),
blobovnicza.WithPath(b.path),
blobovnicza.WithMetrics(b.metrics),
)...)
if err := blz.Open(); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
}
b.refCount++
b.blcza = blz
b.openDBCounter.Inc()
return blz, nil
}
func (b *sharedDB) Close() {
b.guard.Lock()
defer b.guard.Unlock()
if b.refCount == 0 {
b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path))
return
}
if b.refCount == 1 {
b.refCount = 0
if err := b.blcza.Close(); err != nil {
b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
)
}
b.blcza = nil
b.openDBCounter.Dec()
return
}
b.refCount--
}
func (b *sharedDB) Path() string {
return b.path
}
// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree.
type levelDbManager struct {
databases []*sharedDB
}
func newLevelDBManager(width uint64, options []blobovnicza.Option, rootPath string, lvlPath string,
readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlog *atomic.Bool, log *logger.Logger) *levelDbManager {
result := &levelDbManager{
databases: make([]*sharedDB, width),
}
for idx := uint64(0); idx < width; idx++ {
result.databases[idx] = newSharedDB(options, filepath.Join(rootPath, lvlPath, u64ToHexString(idx)), readOnly, metrics, openDBCounter, closedFlog, log)
}
return result
}
func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB {
return m.databases[idx]
}
// dbManager manages the opening and closing of blobovnicza instances.
//
// The blobovnicza opens at the first request, closes after the last request.
type dbManager struct {
levelToManager map[string]*levelDbManager
levelToManagerGuard *sync.RWMutex
closedFlag *atomic.Bool
dbCounter *openDBCounter
rootPath string
options []blobovnicza.Option
readOnly bool
metrics blobovnicza.Metrics
leafWidth uint64
log *logger.Logger
}
func newDBManager(rootPath string, options []blobovnicza.Option, leafWidth uint64, readOnly bool, metrics blobovnicza.Metrics, log *logger.Logger) *dbManager {
return &dbManager{
rootPath: rootPath,
options: options,
readOnly: readOnly,
metrics: metrics,
leafWidth: leafWidth,
levelToManager: make(map[string]*levelDbManager),
levelToManagerGuard: &sync.RWMutex{},
log: log,
closedFlag: &atomic.Bool{},
dbCounter: newOpenDBCounter(),
}
}
func (m *dbManager) GetByPath(path string) *sharedDB {
lvlPath := filepath.Dir(path)
curIndex := u64FromHexString(filepath.Base(path))
levelManager := m.getLevelManager(lvlPath)
return levelManager.GetByIndex(curIndex)
}
func (m *dbManager) Open() {
m.closedFlag.Store(false)
}
func (m *dbManager) Close() {
m.closedFlag.Store(true)
m.dbCounter.WaitUntilAllClosed()
}
func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager {
result := m.getLevelManagerIfExists(lvlPath)
if result != nil {
return result
}
return m.getOrCreateLevelManager(lvlPath)
}
func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager {
m.levelToManagerGuard.RLock()
defer m.levelToManagerGuard.RUnlock()
return m.levelToManager[lvlPath]
}
func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager {
m.levelToManagerGuard.Lock()
defer m.levelToManagerGuard.Unlock()
if result, ok := m.levelToManager[lvlPath]; ok {
return result
}
result := newLevelDBManager(m.leafWidth, m.options, m.rootPath, lvlPath, m.readOnly, m.metrics, m.dbCounter, m.closedFlag, m.log)
m.levelToManager[lvlPath] = result
return result
}
type openDBCounter struct {
cond *sync.Cond
count uint64
}
func newOpenDBCounter() *openDBCounter {
return &openDBCounter{
cond: &sync.Cond{
L: &sync.Mutex{},
},
}
}
func (c *openDBCounter) Inc() {
c.cond.L.Lock()
defer c.cond.L.Unlock()
c.count++
}
func (c *openDBCounter) Dec() {
c.cond.L.Lock()
defer c.cond.L.Unlock()
if c.count > 0 {
c.count--
}
if c.count == 0 {
c.cond.Broadcast()
}
}
func (c *openDBCounter) WaitUntilAllClosed() {
c.cond.L.Lock()
for c.count > 0 {
c.cond.Wait()
}
c.cond.L.Unlock()
}

View file

@ -7,7 +7,7 @@ import (
)
type Metrics interface {
Blobovnicza() blobovnicza.Metrics
Blobovnizca() blobovnicza.Metrics
SetParentID(parentID string)
@ -33,6 +33,6 @@ func (m *noopMetrics) GetRange(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Get(time.Duration, int, bool, bool) {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
func (m *noopMetrics) Blobovnicza() blobovnicza.Metrics {
func (m *noopMetrics) Blobovnizca() blobovnicza.Metrics {
return &blobovnicza.NoopMetrics{}
}

View file

@ -17,7 +17,6 @@ type cfg struct {
openedCacheSize int
blzShallowDepth uint64
blzShallowWidth uint64
blzLeafWidth uint64
compression *compression.Config
blzOpts []blobovnicza.Option
// reportError is the function called when encountering disk errors.
@ -65,12 +64,6 @@ func WithBlobovniczaShallowWidth(width uint64) Option {
}
}
func WithBlobovniczaLeafWidth(w uint64) Option {
return func(c *cfg) {
c.blzLeafWidth = w
}
}
func WithBlobovniczaShallowDepth(depth uint64) Option {
return func(c *cfg) {
c.blzShallowDepth = depth

View file

@ -2,6 +2,7 @@ package blobovniczatree
import (
"context"
"errors"
"path/filepath"
"time"
@ -9,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
@ -74,8 +76,8 @@ type putIterator struct {
PutPrm blobovnicza.PutPrm
}
func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) {
active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath)
func (i *putIterator) iterate(ctx context.Context, path string) (bool, error) {
active, err := i.B.getActivated(path)
if err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
@ -87,29 +89,46 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
return false, nil
}
if active == nil {
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath))
return false, nil
if _, err := active.blz.Put(ctx, i.PutPrm); err != nil {
// Check if blobovnicza is full. We could either receive `blobovnicza.ErrFull` error
// or update active blobovnicza in other thread. In the latter case the database will be closed
// and `updateActive` takes care of not updating the active blobovnicza twice.
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
if isFull {
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
}
defer active.Close()
i.AllFull = false
_, err = active.Blobovnicza().Put(ctx, i.PutPrm)
if err != nil {
if err := i.B.updateActive(path, &active.ind); err != nil {
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err)
} else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.Path()),
i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza,
zap.String("level", path),
zap.String("error", err.Error()))
}
return false, nil
}
idx := u64FromHexString(filepath.Base(active.Path()))
i.ID = blobovnicza.NewIDFromBytes([]byte(filepath.Join(lvlPath, u64ToHexString(idx))))
return i.iterate(ctx, path)
}
i.AllFull = false
if !isLogical(err) {
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
zap.String("error", err.Error()))
}
return false, nil
}
path = filepath.Join(path, u64ToHexString(active.ind))
i.ID = blobovnicza.NewIDFromBytes([]byte(path))
return true, nil
}

View file

@ -7,15 +7,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
)
t.Cleanup(func() { _ = s.Close() })
require.NoError(t, s.Open(false))

View file

@ -22,10 +22,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
// TestInitializationFailure checks that shard is initialized and closed even if media
@ -53,7 +54,7 @@ func TestInitializationFailure(t *testing.T) {
return []shard.Option{
shard.WithID(sid),
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions(
@ -294,7 +295,7 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
te := testNewEngine(t).
setShardsNumOpts(t, num, func(id int) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
shard.WithMetaBaseOptions(

View file

@ -6,13 +6,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestDeleteBigObject(t *testing.T) {
@ -53,7 +54,7 @@ func TestDeleteBigObject(t *testing.T) {
s3 := testNewShard(t, 3)
e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
e.log = test.NewLogger(t, true)
e.log = &logger.Logger{Logger: zap.L()}
defer e.Close()
for i := range children {

View file

@ -15,13 +15,14 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/hrw"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
type epochState struct{}
@ -79,7 +80,7 @@ type testEngineWrapper struct {
}
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
engine := New(WithLogger(test.NewLogger(t, true)))
engine := New(WithLogger(&logger.Logger{Logger: zap.L()}))
for _, opt := range opts {
opt(engine.cfg)
}
@ -198,7 +199,7 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
func testDefaultShardOptions(t testing.TB, id int) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(
newStorages(t.TempDir(), 1<<20))),

View file

@ -17,10 +17,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
const errSmallSize = 256
@ -55,7 +56,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
largeFileStorage: largeFileStorage,
}
return []shard.Option{
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),

View file

@ -16,11 +16,12 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
)
@ -30,7 +31,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
te := testNewEngine(t).
setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{{
Storage: fstree.New(

View file

@ -12,10 +12,11 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithType {
@ -65,7 +66,7 @@ func TestListWithCursor(t *testing.T) {
t.Parallel()
e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(
newStorages(t.TempDir(), 1<<20))),

View file

@ -3,7 +3,7 @@ package engine
import (
"context"
"errors"
"strconv"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@ -145,7 +145,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -177,7 +177,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -208,7 +208,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
@ -333,7 +333,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()

View file

@ -80,7 +80,7 @@ func BenchmarkPut(b *testing.B) {
})
}
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
func TestDB_PutBlobovnicaUpdate(t *testing.T) {
db := newDB(t)
raw1 := testutil.GenerateObject()

View file

@ -22,7 +22,7 @@ type blobovniczaTreeMetrics struct {
m metrics_impl.BlobobvnizcaMetrics
}
func (m *blobovniczaTreeMetrics) Blobovnicza() blobovnicza.Metrics {
func (m *blobovniczaTreeMetrics) Blobovnizca() blobovnicza.Metrics {
return &blobovniczaMetrics{
shardID: func() string { return m.shardID },
path: m.path,
@ -81,26 +81,18 @@ type blobovniczaMetrics struct {
path string
}
func (m *blobovniczaMetrics) AddOpenBlobovniczaSize(size uint64) {
m.m.AddOpenBlobovniczaSize(m.shardID(), m.path, size)
func (m *blobovniczaMetrics) AddOpenBlobovnizcaSize(size uint64) {
m.m.AddOpenBlobovnizcaSize(m.shardID(), m.path, size)
}
func (m *blobovniczaMetrics) SubOpenBlobovniczaSize(size uint64) {
m.m.SubOpenBlobovniczaSize(m.shardID(), m.path, size)
func (m *blobovniczaMetrics) SubOpenBlobovnizcaSize(size uint64) {
m.m.SubOpenBlobovnizcaSize(m.shardID(), m.path, size)
}
func (m *blobovniczaMetrics) IncOpenBlobovniczaCount() {
m.m.IncOpenBlobovniczaCount(m.shardID(), m.path)
func (m *blobovniczaMetrics) IncOpenBlobovnizcaCount() {
m.m.IncOpenBlobovnizcaCount(m.shardID(), m.path)
}
func (m *blobovniczaMetrics) DecOpenBlobovniczaCount() {
m.m.DecOpenBlobovniczaCount(m.shardID(), m.path)
}
func (m *blobovniczaMetrics) AddOpenBlobovniczaItems(items uint64) {
m.m.AddOpenBlobovniczaItems(m.shardID(), m.path, items)
}
func (m *blobovniczaMetrics) SubOpenBlobovniczaItems(items uint64) {
m.m.SubOpenBlobovniczaItems(m.shardID(), m.path, items)
func (m *blobovniczaMetrics) DecOpenBlobovnizcaCount() {
m.m.DecOpenBlobovnizcaCount(m.shardID(), m.path)
}

View file

@ -9,7 +9,6 @@ import (
"math/rand"
"os"
"path/filepath"
"strconv"
"sync"
"time"
@ -292,7 +291,7 @@ func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
@ -890,7 +889,7 @@ func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID stri
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -938,7 +937,7 @@ func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -1047,7 +1046,7 @@ func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID str
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()

View file

@ -19,7 +19,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -30,6 +30,7 @@ import (
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
type epochState struct{}
@ -74,7 +75,7 @@ func TestShardOpen(t *testing.T) {
newShard := func() *Shard {
return New(
WithID(NewIDFromBytes([]byte{})),
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
{Storage: st},

View file

@ -3,7 +3,6 @@ package shard_test
import (
"context"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@ -57,10 +56,8 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err = sh.Delete(context.TODO(), delPrm)
require.NoError(t, err)
require.Eventually(t, func() bool {
_, err = sh.Get(context.Background(), getPrm)
return client.IsErrObjectNotFound(err)
}, time.Second, 50*time.Millisecond)
require.True(t, client.IsErrObjectNotFound(err))
})
t.Run("small object", func(t *testing.T) {
@ -83,9 +80,7 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err = sh.Delete(context.Background(), delPrm)
require.NoError(t, err)
require.Eventually(t, func() bool {
_, err = sh.Get(context.Background(), getPrm)
return client.IsErrObjectNotFound(err)
}, time.Second, 50*time.Millisecond)
require.True(t, client.IsErrObjectNotFound(err))
})
}

View file

@ -15,13 +15,14 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
@ -30,13 +31,14 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
rootPath := t.TempDir()
var sh *Shard
l := test.NewLogger(t, true)
l := &logger.Logger{Logger: zap.L()}
blobOpts := []blobstor.Option{
blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithLogger(&logger.Logger{Logger: zap.L()}),
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),

View file

@ -3,7 +3,6 @@ package shard_test
import (
"context"
"path/filepath"
"sync"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -20,7 +19,6 @@ import (
)
type metricsStore struct {
mtx sync.Mutex
objCounters map[string]uint64
cnrSize map[string]int64
pldSize int64
@ -28,40 +26,13 @@ type metricsStore struct {
errCounter int64
}
func (m *metricsStore) SetShardID(_ string) {}
func (m metricsStore) SetShardID(_ string) {}
func (m *metricsStore) SetObjectCounter(objectType string, v uint64) {
m.mtx.Lock()
defer m.mtx.Unlock()
func (m metricsStore) SetObjectCounter(objectType string, v uint64) {
m.objCounters[objectType] = v
}
func (m *metricsStore) getObjectCounter(objectType string) uint64 {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.objCounters[objectType]
}
func (m *metricsStore) containerSizes() map[string]int64 {
m.mtx.Lock()
defer m.mtx.Unlock()
r := make(map[string]int64, len(m.cnrSize))
for c, s := range m.cnrSize {
r[c] = s
}
return r
}
func (m *metricsStore) payloadSize() int64 {
m.mtx.Lock()
defer m.mtx.Unlock()
return m.pldSize
}
func (m *metricsStore) AddToObjectCounter(objectType string, delta int) {
m.mtx.Lock()
defer m.mtx.Unlock()
func (m metricsStore) AddToObjectCounter(objectType string, delta int) {
switch {
case delta > 0:
m.objCounters[objectType] += uint64(delta)
@ -78,51 +49,35 @@ func (m *metricsStore) AddToObjectCounter(objectType string, delta int) {
}
}
func (m *metricsStore) IncObjectCounter(objectType string) {
m.mtx.Lock()
defer m.mtx.Unlock()
func (m metricsStore) IncObjectCounter(objectType string) {
m.objCounters[objectType] += 1
}
func (m *metricsStore) DecObjectCounter(objectType string) {
m.mtx.Lock()
defer m.mtx.Unlock()
func (m metricsStore) DecObjectCounter(objectType string) {
m.AddToObjectCounter(objectType, -1)
}
func (m *metricsStore) SetMode(mode mode.Mode) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.mode = mode
}
func (m *metricsStore) AddToContainerSize(cnr string, size int64) {
m.mtx.Lock()
defer m.mtx.Unlock()
func (m metricsStore) AddToContainerSize(cnr string, size int64) {
m.cnrSize[cnr] += size
}
func (m *metricsStore) AddToPayloadSize(size int64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.pldSize += size
}
func (m *metricsStore) IncErrorCounter() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter += 1
}
func (m *metricsStore) ClearErrorCounter() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter = 0
}
func (m *metricsStore) DeleteShardMetrics() {
m.mtx.Lock()
defer m.mtx.Unlock()
m.errCounter = 0
}
@ -147,10 +102,10 @@ func TestCounters(t *testing.T) {
}
t.Run("defaults", func(t *testing.T) {
require.Zero(t, mm.getObjectCounter(physical))
require.Zero(t, mm.getObjectCounter(logical))
require.Empty(t, mm.containerSizes())
require.Zero(t, mm.payloadSize())
require.Zero(t, mm.objCounters[physical])
require.Zero(t, mm.objCounters[logical])
require.Empty(t, mm.cnrSize)
require.Zero(t, mm.pldSize)
})
var totalPayload int64
@ -173,10 +128,10 @@ func TestCounters(t *testing.T) {
require.NoError(t, err)
}
require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
require.Equal(t, uint64(objNumber), mm.objCounters[physical])
require.Equal(t, uint64(objNumber), mm.objCounters[logical])
require.Equal(t, expectedSizes, mm.cnrSize)
require.Equal(t, totalPayload, mm.pldSize)
})
t.Run("inhume_GC", func(t *testing.T) {
@ -190,10 +145,10 @@ func TestCounters(t *testing.T) {
require.NoError(t, err)
}
require.Equal(t, uint64(objNumber), mm.getObjectCounter(physical))
require.Equal(t, uint64(objNumber-inhumedNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
require.Equal(t, uint64(objNumber), mm.objCounters[physical])
require.Equal(t, uint64(objNumber-inhumedNumber), mm.objCounters[logical])
require.Equal(t, expectedSizes, mm.cnrSize)
require.Equal(t, totalPayload, mm.pldSize)
oo = oo[inhumedNumber:]
})
@ -202,8 +157,8 @@ func TestCounters(t *testing.T) {
var prm shard.InhumePrm
ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
phy := mm.objCounters[physical]
logic := mm.objCounters[logical]
inhumedNumber := int(phy / 4)
prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
@ -211,10 +166,10 @@ func TestCounters(t *testing.T) {
_, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy, mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(inhumedNumber), mm.getObjectCounter(logical))
require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload, mm.payloadSize())
require.Equal(t, phy, mm.objCounters[physical])
require.Equal(t, logic-uint64(inhumedNumber), mm.objCounters[logical])
require.Equal(t, expectedSizes, mm.cnrSize)
require.Equal(t, totalPayload, mm.pldSize)
oo = oo[inhumedNumber:]
})
@ -222,8 +177,8 @@ func TestCounters(t *testing.T) {
t.Run("Delete", func(t *testing.T) {
var prm shard.DeletePrm
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
phy := mm.objCounters[physical]
logic := mm.objCounters[logical]
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
@ -231,8 +186,8 @@ func TestCounters(t *testing.T) {
_, err := sh.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy-uint64(deletedNumber), mm.getObjectCounter(physical))
require.Equal(t, logic-uint64(deletedNumber), mm.getObjectCounter(logical))
require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical])
require.Equal(t, logic-uint64(deletedNumber), mm.objCounters[logical])
var totalRemovedpayload uint64
for i := range oo[:deletedNumber] {
removedPayload := oo[i].PayloadSize()
@ -241,8 +196,8 @@ func TestCounters(t *testing.T) {
cnr, _ := oo[i].ContainerID()
expectedSizes[cnr.EncodeToString()] -= int64(removedPayload)
}
require.Equal(t, expectedSizes, mm.containerSizes())
require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.payloadSize())
require.Equal(t, expectedSizes, mm.cnrSize)
require.Equal(t, totalPayload-int64(totalRemovedpayload), mm.pldSize)
})
}

View file

@ -14,11 +14,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestShard_GetRange(t *testing.T) {
@ -81,7 +82,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
[]blobstor.Option{blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}),
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),

View file

@ -11,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -20,6 +20,7 @@ import (
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestShardReload(t *testing.T) {
@ -27,7 +28,8 @@ func TestShardReload(t *testing.T) {
p := t.Name()
defer os.RemoveAll(p)
l := test.NewLogger(t, true)
l := &logger.Logger{Logger: zap.L()}
blobOpts := []blobstor.Option{
blobstor.WithLogger(l),
blobstor.WithStorages([]blobstor.SubStorage{

View file

@ -16,11 +16,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebadger"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachebbolt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
type epochState struct {
@ -58,11 +59,11 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
if bsOpts == nil {
bsOpts = []blobstor.Option{
blobstor.WithLogger(test.NewLogger(t, true)),
blobstor.WithLogger(&logger.Logger{Logger: zap.L()}),
blobstor.WithStorages([]blobstor.SubStorage{
{
Storage: blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithLogger(test.NewLogger(t, true)),
blobovniczatree.WithLogger(&logger.Logger{Logger: zap.L()}),
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(1),
blobovniczatree.WithBlobovniczaShallowWidth(1)),
@ -80,7 +81,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
opts := []shard.Option{
shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithLogger(test.NewLogger(t, true)),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(bsOpts...),
shard.WithMetaBaseOptions(
append([]meta.Option{
@ -101,7 +102,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
require.NoError(t, err)
return pool
}),
shard.WithGCRemoverSleepInterval(100 * time.Millisecond),
shard.WithGCRemoverSleepInterval(1 * time.Millisecond),
}
sh = shard.New(opts...)

View file

@ -2,7 +2,7 @@ package shard
import (
"context"
"strconv"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@ -140,7 +140,7 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -165,7 +165,7 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("node_id", strconv.FormatUint(nodeID, 10)),
attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
),
)
defer span.End()
@ -190,7 +190,7 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string,
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()
@ -309,7 +309,7 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre
attribute.String("shard_id", s.ID().String()),
attribute.String("container_id", cid.EncodeToString()),
attribute.String("tree_id", treeID),
attribute.String("height", strconv.FormatUint(height, 10)),
attribute.String("height", fmt.Sprintf("%d", height)),
),
)
defer span.End()

View file

@ -21,10 +21,6 @@ type cache struct {
// flushCh is a channel with objects to flush.
flushCh chan *objectSDK.Object
// scheduled4Flush contains objects scheduled for flush via flushCh
// helps to avoid multiple flushing of one object
scheduled4Flush map[oid.Address]struct{}
scheduled4FlushMtx sync.RWMutex
// closeCh is close channel, protected by modeMtx.
closeCh chan struct{}
// wg is a wait group for flush workers.
@ -53,7 +49,6 @@ func New(opts ...Option) writecache.Cache {
c := &cache{
flushCh: make(chan *objectSDK.Object),
mode: mode.ReadWrite,
scheduled4Flush: map[oid.Address]struct{}{},
options: options{
log: &logger.Logger{Logger: zap.NewNop()},

View file

@ -39,10 +39,10 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
return writecache.ErrReadOnly
}
key := addr2key(addr)
saddr := addr.EncodeToString()
err := c.db.Update(func(tx *badger.Txn) error {
it, err := tx.Get(key[:])
it, err := tx.Get([]byte(saddr))
if err != nil {
if err == badger.ErrKeyNotFound {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
@ -51,10 +51,10 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
}
if it.ValueSize() > 0 {
storageType = writecache.StorageTypeDB
err := tx.Delete(key[:])
err := tx.Delete([]byte(saddr))
if err == nil {
storagelog.Write(c.log,
storagelog.AddressField(addr.EncodeToString()),
storagelog.AddressField(saddr),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("db DELETE"),
)

View file

@ -18,7 +18,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/dgraph-io/badger/v4"
"github.com/dgraph-io/ristretto/z"
"github.com/mr-tron/base58"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
@ -35,65 +35,6 @@ const (
defaultFlushInterval = time.Second
)
type collector struct {
cache *cache
scheduled int
processed int
cancel func()
}
func (c *collector) Send(buf *z.Buffer) error {
list, err := badger.BufferToKVList(buf)
if err != nil {
return err
}
for _, kv := range list.Kv {
select {
case <-c.cache.closeCh:
c.cancel()
return nil
default:
}
if kv.StreamDone {
return nil
}
if c.scheduled >= flushBatchSize {
c.cancel()
return nil
}
if got, want := len(kv.Key), len(internalKey{}); got != want {
c.cache.log.Debug(
fmt.Sprintf("not expected db key len: got %d, want %d", got, want))
continue
}
c.processed++
obj := objectSDK.New()
val := bytes.Clone(kv.Value)
if err = obj.Unmarshal(val); err != nil {
continue
}
addr := objectCore.AddressOf(obj)
c.cache.scheduled4FlushMtx.RLock()
_, ok := c.cache.scheduled4Flush[addr]
c.cache.scheduled4FlushMtx.RUnlock()
if ok {
c.cache.log.Debug(logs.WritecacheBadgerObjAlreadyScheduled, zap.Stringer("obj", addr))
continue
}
c.cache.scheduled4FlushMtx.Lock()
c.cache.scheduled4Flush[addr] = struct{}{}
c.cache.scheduled4FlushMtx.Unlock()
c.scheduled++
select {
case c.cache.flushCh <- obj:
case <-c.cache.closeCh:
c.cancel()
return nil
}
}
return nil
}
// runFlushLoop starts background workers which periodically flush objects to the blobstor.
func (c *cache) runFlushLoop() {
for i := 0; i < c.workersCount; i++ {
@ -121,12 +62,17 @@ func (c *cache) runFlushLoop() {
}
func (c *cache) flushSmallObjects() {
var lastKey internalKey
var m []objectInfo
for {
select {
case <-c.closeCh:
return
default:
}
m = m[:0]
c.modeMtx.RLock()
if c.readOnly() {
c.modeMtx.RUnlock()
@ -140,24 +86,61 @@ func (c *cache) flushSmallObjects() {
c.modeMtx.RUnlock()
return
}
ctx, cancel := context.WithCancel(context.TODO())
coll := collector{
cache: c,
cancel: cancel,
_ = c.db.View(func(tx *badger.Txn) error {
it := tx.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
if len(lastKey) == 0 {
it.Rewind()
} else {
it.Seek(lastKey[:])
if it.Valid() && bytes.Equal(it.Item().Key(), lastKey[:]) {
it.Next()
}
stream := c.db.NewStream()
// All calls to Send are done by a single goroutine
stream.Send = coll.Send
if err := stream.Orchestrate(ctx); err != nil {
c.log.Debug(fmt.Sprintf(
"error during flushing object from wc: %s", err))
}
for ; it.Valid() && len(m) < flushBatchSize; it.Next() {
if got, want := int(it.Item().KeySize()), len(lastKey); got != want {
return fmt.Errorf("invalid db key len: got %d, want %d", got, want)
}
it.Item().KeyCopy(lastKey[:])
value, err := it.Item().ValueCopy(nil)
if err != nil {
return err
}
m = append(m, objectInfo{
addr: lastKey.address(),
data: value,
})
}
return nil
})
var count int
for i := range m {
obj := objectSDK.New()
if err := obj.Unmarshal(m[i].data); err != nil {
continue
}
count++
select {
case c.flushCh <- obj:
case <-c.closeCh:
c.modeMtx.RUnlock()
return
}
}
if count == 0 {
c.modeMtx.RUnlock()
if coll.scheduled == 0 {
break
}
c.modeMtx.RUnlock()
c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
zap.Int("scheduled", coll.scheduled), zap.Int("processed", coll.processed))
zap.Int("count", count),
zap.String("start", base58.Encode(lastKey[:])))
}
}
@ -184,14 +167,13 @@ func (c *cache) workerFlushSmall() {
return
}
addr := objectCore.AddressOf(obj)
err := c.flushObject(context.TODO(), obj, nil, writecache.StorageTypeDB)
if err == nil {
c.deleteFromDB([]internalKey{addr2key(addr)})
if err != nil {
// Error is handled in flushObject.
continue
}
c.scheduled4FlushMtx.Lock()
delete(c.scheduled4Flush, addr)
c.scheduled4FlushMtx.Unlock()
c.deleteFromDB([]string{objectCore.AddressOf(obj).EncodeToString()})
}
}

View file

@ -10,17 +10,18 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/dgraph-io/badger/v4"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func TestFlush(t *testing.T) {
createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
return New(
append([]Option{
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithPath(filepath.Join(t.TempDir(), "writecache")),
WithMetabase(mb),
WithBlobstor(bs),
@ -52,9 +53,8 @@ func TestFlush(t *testing.T) {
Desc: "db, invalid object",
InjectFn: func(t *testing.T, wc writecache.Cache) {
c := wc.(*cache)
key := addr2key(oidtest.Address())
require.NoError(t, c.db.Update(func(tx *badger.Txn) error {
return tx.Set(key[:], []byte{1, 2, 3})
return tx.Set([]byte(oidtest.Address().EncodeToString()), []byte{1, 2, 3})
}))
},
},

View file

@ -5,13 +5,14 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
func TestGeneric(t *testing.T) {
storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New(
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithFlushWorkersCount(2),
WithPath(t.TempDir()),
WithGCInterval(1*time.Second))

View file

@ -58,7 +58,7 @@ func (c *cache) openStore(readOnly bool) error {
return nil
}
func (c *cache) deleteFromDB(keys []internalKey) []internalKey {
func (c *cache) deleteFromDB(keys []string) []string {
if len(keys) == 0 {
return keys
}
@ -67,7 +67,7 @@ func (c *cache) deleteFromDB(keys []internalKey) []internalKey {
var errorIndex int
for errorIndex = range keys {
if err := wb.Delete(keys[errorIndex][:]); err != nil {
if err := wb.Delete([]byte(keys[errorIndex])); err != nil {
break
}
}

View file

@ -13,17 +13,18 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache/writecachetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
func TestFlush(t *testing.T) {
createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs writecache.MainStorage, opts ...Option) writecache.Cache {
return New(
append([]Option{
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithPath(filepath.Join(t.TempDir(), "writecache")),
WithSmallObjectSize(smallSize),
WithMetabase(mb),

View file

@ -4,13 +4,14 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/storagetest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
func TestGeneric(t *testing.T) {
storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
return New(
WithLogger(test.NewLogger(t, true)),
WithLogger(&logger.Logger{Logger: zap.L()}),
WithFlushWorkersCount(2),
WithPath(t.TempDir()))
})

View file

@ -15,74 +15,64 @@ type BlobobvnizcaMetrics interface {
AddBlobobvnizcaTreePut(shardID, path string, size int)
AddBlobobvnizcaTreeGet(shardID, path string, size int)
AddOpenBlobovniczaSize(shardID, path string, size uint64)
SubOpenBlobovniczaSize(shardID, path string, size uint64)
AddOpenBlobovnizcaSize(shardID, path string, size uint64)
SubOpenBlobovnizcaSize(shardID, path string, size uint64)
AddOpenBlobovniczaItems(shardID, path string, items uint64)
SubOpenBlobovniczaItems(shardID, path string, items uint64)
IncOpenBlobovniczaCount(shardID, path string)
DecOpenBlobovniczaCount(shardID, path string)
IncOpenBlobovnizcaCount(shardID, path string)
DecOpenBlobovnizcaCount(shardID, path string)
}
type blobovnicza struct {
type blobovnizca struct {
treeMode *shardIDPathModeValue
treeReqDuration *prometheus.HistogramVec
treePut *prometheus.CounterVec
treeGet *prometheus.CounterVec
treeOpenSize *prometheus.GaugeVec
treeOpenItems *prometheus.GaugeVec
treeOpenCounter *prometheus.GaugeVec
}
func newBlobovnicza() *blobovnicza {
return &blobovnicza{
treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnicza tree mode"),
func newBlobovnizca() *blobovnizca {
return &blobovnizca{
treeMode: newShardIDPathMode(blobovniczaTreeSubSystem, "mode", "Blobovnizca tree mode"),
treeReqDuration: metrics.NewHistogramVec(prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "request_duration_seconds",
Help: "Accumulated Blobovnicza tree request process duration",
Help: "Accumulated Blobovnizca tree request process duration",
}, []string{shardIDLabel, pathLabel, successLabel, methodLabel, withStorageIDLabel}),
treePut: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "put_bytes",
Help: "Accumulated payload size written to Blobovnicza tree",
Help: "Accumulated payload size written to Blobovnizca tree",
}, []string{shardIDLabel, pathLabel}),
treeGet: metrics.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "get_bytes",
Help: "Accumulated payload size read from Blobovnicza tree",
Help: "Accumulated payload size read from Blobovnizca tree",
}, []string{shardIDLabel, pathLabel}),
treeOpenSize: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnicza_size_bytes",
Help: "Size of opened blobovniczas of Blobovnicza tree",
}, []string{shardIDLabel, pathLabel}),
treeOpenItems: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnicza_items_total",
Help: "Count of items in opened blobovniczas of Blobovnicza tree",
Name: "open_blobovnizca_size_bytes",
Help: "Size of opened blobovnizcas of Blobovnizca tree",
}, []string{shardIDLabel, pathLabel}),
treeOpenCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: blobovniczaTreeSubSystem,
Name: "open_blobovnicza_count",
Help: "Count of opened blobovniczas of Blobovnicza tree",
Name: "open_blobovnizca_count",
Help: "Count of opened blobovnizcas of Blobovnizca tree",
}, []string{shardIDLabel, pathLabel}),
}
}
func (b *blobovnicza) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) {
func (b *blobovnizca) SetBlobobvnizcaTreeMode(shardID, path string, readOnly bool) {
b.treeMode.SetMode(shardID, path, modeFromBool(readOnly))
}
func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
func (b *blobovnizca) CloseBlobobvnizcaTree(shardID, path string) {
b.treeMode.SetMode(shardID, path, closedMode)
b.treeReqDuration.DeletePartialMatch(prometheus.Labels{
shardIDLabel: shardID,
@ -98,7 +88,7 @@ func (b *blobovnicza) CloseBlobobvnizcaTree(shardID, path string) {
})
}
func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
func (b *blobovnizca) BlobobvnizcaTreeMethodDuration(shardID, path string, method string, d time.Duration, success bool, withStorageID NullBool) {
b.treeReqDuration.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
@ -108,58 +98,44 @@ func (b *blobovnicza) BlobobvnizcaTreeMethodDuration(shardID, path string, metho
}).Observe(d.Seconds())
}
func (b *blobovnicza) AddBlobobvnizcaTreePut(shardID, path string, size int) {
func (b *blobovnizca) AddBlobobvnizcaTreePut(shardID, path string, size int) {
b.treePut.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Add(float64(size))
}
func (b *blobovnicza) AddBlobobvnizcaTreeGet(shardID, path string, size int) {
func (b *blobovnizca) AddBlobobvnizcaTreeGet(shardID, path string, size int) {
b.treeGet.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Add(float64(size))
}
func (b *blobovnicza) AddOpenBlobovniczaSize(shardID, path string, size uint64) {
func (b *blobovnizca) AddOpenBlobovnizcaSize(shardID, path string, size uint64) {
b.treeOpenSize.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Add(float64(size))
}
func (b *blobovnicza) SubOpenBlobovniczaSize(shardID, path string, size uint64) {
func (b *blobovnizca) SubOpenBlobovnizcaSize(shardID, path string, size uint64) {
b.treeOpenSize.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Sub(float64(size))
}
func (b *blobovnicza) IncOpenBlobovniczaCount(shardID, path string) {
func (b *blobovnizca) IncOpenBlobovnizcaCount(shardID, path string) {
b.treeOpenCounter.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Inc()
}
func (b *blobovnicza) DecOpenBlobovniczaCount(shardID, path string) {
func (b *blobovnizca) DecOpenBlobovnizcaCount(shardID, path string) {
b.treeOpenCounter.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Dec()
}
func (b *blobovnicza) AddOpenBlobovniczaItems(shardID, path string, items uint64) {
b.treeOpenItems.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Add(float64(items))
}
func (b *blobovnicza) SubOpenBlobovniczaItems(shardID, path string, items uint64) {
b.treeOpenItems.With(prometheus.Labels{
shardIDLabel: shardID,
pathLabel: path,
}).Sub(float64(items))
}

View file

@ -14,7 +14,7 @@ type NodeMetrics struct {
epoch prometheus.Gauge
fstree *fstreeMetrics
blobstore *blobstoreMetrics
blobobvnizca *blobovnicza
blobobvnizca *blobovnizca
metabase *metabaseMetrics
pilorama *piloramaMetrics
grpc *grpcServerMetrics
@ -35,7 +35,7 @@ func NewNodeMetrics() *NodeMetrics {
}),
fstree: newFSTreeMetrics(),
blobstore: newBlobstoreMetrics(),
blobobvnizca: newBlobovnicza(),
blobobvnizca: newBlobovnizca(),
metabase: newMetabaseMetrics(),
pilorama: newPiloramaMetrics(),
grpc: newGrpcServerMetrics(),

View file

@ -29,7 +29,6 @@ const (
containersOfMethod = "containersOf"
eaclMethod = "eACL"
setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
startEstimationMethod = "startContainerEstimation"
stopEstimationMethod = "stopContainerEstimation"

View file

@ -1,68 +0,0 @@
package container
import (
"crypto/sha256"
"fmt"
"strings"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) {
return DeletionInfo((*Client)(x), cnr)
}
type deletionInfo interface {
DeletionInfo(cid []byte) (*containercore.DelInfo, error)
}
func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) {
binCnr := make([]byte, sha256.Size)
cnr.Encode(binCnr)
return c.DeletionInfo(binCnr)
}
func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) {
prm := client.TestInvokePrm{}
prm.SetMethod(deletionInfoMethod)
prm.SetArgs(cid)
res, err := c.client.TestInvoke(prm)
if err != nil {
if strings.Contains(err.Error(), containerContract.NotFoundError) {
return nil, new(apistatus.ContainerNotFound)
}
return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err)
} else if ln := len(res); ln != 1 {
return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln)
}
arr, err := client.ArrayFromStackItem(res[0])
if err != nil {
return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err)
}
if len(arr) != 2 {
return nil, fmt.Errorf("unexpected container stack item count (%s): %d", deletionInfoMethod, len(arr))
}
owner, err := client.BytesFromStackItem(arr[0])
if err != nil {
return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err)
}
epoch, err := client.IntFromStackItem(arr[1])
if err != nil {
return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err)
}
return &containercore.DelInfo{
Owner: owner,
Epoch: int(epoch),
}, nil
}

View file

@ -1,4 +1,4 @@
package object
package v2
import (
"bytes"
@ -11,64 +11,50 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
type InnerRing interface {
InnerRingKeys() ([][]byte, error)
}
type SenderClassifier struct {
type senderClassifier struct {
log *logger.Logger
innerRing InnerRing
innerRing InnerRingFetcher
netmap core.Source
}
func NewSenderClassifier(innerRing InnerRing, netmap core.Source, log *logger.Logger) SenderClassifier {
return SenderClassifier{
log: log,
innerRing: innerRing,
netmap: netmap,
}
type classifyResult struct {
role acl.Role
key []byte
}
type ClassifyResult struct {
Role acl.Role
Key []byte
}
func (c SenderClassifier) Classify(
ownerID *user.ID,
ownerKey *keys.PublicKey,
func (c senderClassifier) classify(
req MetaWithToken,
idCnr cid.ID,
cnr container.Container) (res *ClassifyResult, err error) {
cnr container.Container) (res *classifyResult, err error) {
ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return nil, err
}
ownerKeyInBytes := ownerKey.Bytes()
// TODO: #767 get owner from frostfs.id if present
// if request owner is the same as container owner, return RoleUser
if ownerID.Equals(cnr.Owner()) {
return &ClassifyResult{
Role: acl.RoleOwner,
Key: ownerKeyInBytes,
return &classifyResult{
role: acl.RoleOwner,
key: ownerKeyInBytes,
}, nil
}
return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr)
}
func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
Key: ownerKeyInBytes,
return &classifyResult{
role: acl.RoleInnerRing,
key: ownerKeyInBytes,
}, nil
}
@ -83,20 +69,20 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC
c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error()))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,
Key: ownerKeyInBytes,
return &classifyResult{
role: acl.RoleContainer,
key: ownerKeyInBytes,
}, nil
}
// if none of above, return RoleOthers
return &ClassifyResult{
Role: acl.RoleOthers,
Key: ownerKeyInBytes,
return &classifyResult{
role: acl.RoleOthers,
key: ownerKeyInBytes,
}, nil
}
func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
func (c senderClassifier) isInnerRingKey(owner []byte) (bool, error) {
innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil {
return false, err
@ -112,7 +98,7 @@ func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
return false, nil
}
func (c SenderClassifier) isContainerKey(
func (c senderClassifier) isContainerKey(
owner, idCnr []byte,
cnr container.Container) (bool, error) {
nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -25,7 +24,7 @@ import (
type Service struct {
*cfg
c objectCore.SenderClassifier
c senderClassifier
}
type putStreamBasicChecker struct {
@ -96,7 +95,11 @@ func New(next object.ServiceServer,
return Service{
cfg: cfg,
c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log),
c: senderClassifier{
log: cfg.log,
innerRing: cfg.irFetcher,
netmap: cfg.nm,
},
}
}
@ -649,24 +652,20 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in
}
// find request role and key
ownerID, ownerKey, err := req.RequestOwner()
if err != nil {
return info, err
}
res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value)
res, err := b.c.classify(req, idCnr, cnr.Value)
if err != nil {
return info, err
}
info.basicACL = cnr.Value.BasicACL()
info.requestRole = res.Role
info.requestRole = res.role
info.operation = op
info.cnrOwner = cnr.Value.Owner()
info.idCnr = idCnr
// it is assumed that at the moment the key will be valid,
// otherwise the request would not pass validation
info.senderKey = res.Key
info.senderKey = res.key
// add bearer token if it is present in request
info.bearer = req.bearer

View file

@ -0,0 +1,7 @@
package deletesvc
import "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
func (exec *execCtx) executeOnContainer() {
exec.log.Debug(logs.DeleteRequestIsNotRolledOverToTheContainer)
}

View file

@ -29,17 +29,33 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
exec.setLogger(s.log)
return exec.execute(ctx)
exec.execute(ctx)
return exec.statusError.err
}
func (exec *execCtx) execute(ctx context.Context) error {
func (exec *execCtx) execute(ctx context.Context) {
exec.log.Debug(logs.ServingRequest)
if err := exec.executeLocal(ctx); err != nil {
exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error()))
return err
}
// perform local operation
exec.executeLocal(ctx)
exec.log.Debug(logs.OperationFinishedSuccessfully)
return nil
exec.analyzeStatus(true)
}
func (exec *execCtx) analyzeStatus(execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
exec.log.Debug(logs.OperationFinishedSuccessfully)
default:
exec.log.Debug(logs.OperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
if execCnr {
exec.executeOnContainer()
exec.analyzeStatus(false)
}
}
}

View file

@ -2,7 +2,6 @@ package deletesvc
import (
"context"
"fmt"
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
@ -16,11 +15,18 @@ import (
"go.uber.org/zap"
)
type statusError struct {
status int
err error
}
type execCtx struct {
svc *Service
prm Prm
statusError
log *logger.Logger
tombstone *objectSDK.Tombstone
@ -30,6 +36,11 @@ type execCtx struct {
tombstoneObj *objectSDK.Object
}
const (
statusUndefined int = iota
statusOK
)
func (exec *execCtx) setLogger(l *logger.Logger) {
exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "DELETE"),
@ -64,34 +75,48 @@ func (exec *execCtx) newAddress(id oid.ID) oid.Address {
return a
}
func (exec *execCtx) formSplitInfo(ctx context.Context) error {
func (exec *execCtx) formSplitInfo(ctx context.Context) bool {
success := false
var err error
exec.splitInfo, err = exec.svc.header.splitInfo(ctx, exec)
if err != nil && !apiclient.IsErrObjectAlreadyRemoved(err) {
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotComposeSplitInfo,
zap.String("error", err.Error()),
)
case err == nil, apiclient.IsErrObjectAlreadyRemoved(err):
// IsErrObjectAlreadyRemoved check is required because splitInfo
// implicitly performs Head request that may return ObjectAlreadyRemoved
// status that is not specified for Delete.
return err
// status that is not specified for Delete
exec.status = statusOK
exec.err = nil
success = true
}
return nil
return success
}
func (exec *execCtx) collectMembers(ctx context.Context) error {
func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) {
if exec.splitInfo == nil {
exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return nil
return true
}
var err error
if _, withLink := exec.splitInfo.Link(); withLink {
err = exec.collectChildren(ctx)
ok = exec.collectChildren(ctx)
}
if err != nil {
if !ok {
if _, withLast := exec.splitInfo.LastPart(); withLast {
if err := exec.collectChain(ctx); err != nil {
return err
ok = exec.collectChain(ctx)
if !ok {
return
}
}
} // may be fail if neither right nor linking ID is set?
@ -99,7 +124,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error {
return exec.supplementBySplitID(ctx)
}
func (exec *execCtx) collectChain(ctx context.Context) error {
func (exec *execCtx) collectChain(ctx context.Context) bool {
var chain []oid.ID
exec.log.Debug(logs.DeleteAssemblingChain)
@ -108,43 +133,84 @@ func (exec *execCtx) collectChain(ctx context.Context) error {
chain = append(chain, prev)
p, err := exec.svc.header.previous(ctx, exec, prev)
if err != nil {
return fmt.Errorf("get previous split element for %s: %w", prev, err)
}
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotGetPreviousSplitElement,
zap.Stringer("id", prev),
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
withPrev = p != nil
if withPrev {
prev = *p
}
}
}
exec.addMembers(chain)
return nil
return true
}
func (exec *execCtx) collectChildren(ctx context.Context) error {
func (exec *execCtx) collectChildren(ctx context.Context) bool {
exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
if err != nil {
return fmt.Errorf("collect children: %w", err)
}
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotCollectObjectChildren,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
link, _ := exec.splitInfo.Link()
exec.addMembers(append(children, link))
return nil
return true
}
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) error {
func (exec *execCtx) supplementBySplitID(ctx context.Context) bool {
exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
if err != nil {
return fmt.Errorf("search split chain members: %w", err)
}
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotSearchForSplitChainMembers,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
exec.addMembers(chain)
return nil
return true
}
}
func (exec *execCtx) addMembers(incoming []oid.ID) {
@ -162,10 +228,17 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
exec.tombstone.SetMembers(append(members, incoming...))
}
func (exec *execCtx) initTombstoneObject() error {
func (exec *execCtx) initTombstoneObject() bool {
payload, err := exec.tombstone.Marshal()
if err != nil {
return fmt.Errorf("marshal tombstone: %w", err)
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotMarshalTombstoneStructure,
zap.String("error", err.Error()),
)
return false
}
exec.tombstoneObj = objectSDK.New()
@ -189,15 +262,29 @@ func (exec *execCtx) initTombstoneObject() error {
exec.tombstoneObj.SetAttributes(a)
return nil
return true
}
func (exec *execCtx) saveTombstone(ctx context.Context) error {
func (exec *execCtx) saveTombstone(ctx context.Context) bool {
id, err := exec.svc.placer.put(ctx, exec)
if err != nil {
return fmt.Errorf("save tombstone: %w", err)
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotSaveTheTombstone,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.status = statusOK
exec.err = nil
exec.prm.tombAddrWriter.
SetAddress(exec.newAddress(*id))
}
exec.prm.tombAddrWriter.SetAddress(exec.newAddress(*id))
return nil
return true
}

View file

@ -2,29 +2,37 @@ package deletesvc
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
func (exec *execCtx) executeLocal(ctx context.Context) {
exec.log.Debug(logs.DeleteFormingTombstoneStructure)
if err := exec.formTombstone(ctx); err != nil {
return err
ok := exec.formTombstone(ctx)
if !ok {
return
}
exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
return exec.saveTombstone(ctx)
exec.saveTombstone(ctx)
}
func (exec *execCtx) formTombstone(ctx context.Context) error {
func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
tsLifetime, err := exec.svc.netInfo.TombstoneLifetime()
if err != nil {
return fmt.Errorf("fetch tombstone lifetime: %w", err)
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.DeleteCouldNotReadTombstoneLifetimeConfig,
zap.String("error", err.Error()),
)
return false
}
exec.tombstone = objectSDK.NewTombstone()
@ -35,19 +43,26 @@ func (exec *execCtx) formTombstone(ctx context.Context) error {
exec.log.Debug(logs.DeleteFormingSplitInfo)
if err := exec.formSplitInfo(ctx); err != nil {
return fmt.Errorf("form split info: %w", err)
ok = exec.formSplitInfo(ctx)
if !ok {
return
}
exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
if err := exec.collectMembers(ctx); err != nil {
return err
ok = exec.collectMembers(ctx)
if !ok {
return
}
exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
return exec.initTombstoneObject()
ok = exec.initTombstoneObject()
if !ok {
return
}
return true
}

View file

@ -273,7 +273,7 @@ func TestGetLocalOnly(t *testing.T) {
newSvc := func(storage *testStorage) *Service {
return &Service{
log: test.NewLogger(t, true),
log: test.NewLogger(t, false),
localStorage: storage,
}
}
@ -535,7 +535,7 @@ func TestGetRemoteSmall(t *testing.T) {
const curEpoch = 13
return &Service{
log: test.NewLogger(t, true),
log: test.NewLogger(t, false),
localStorage: newTestStorage(),
traverserGenerator: &testTraverserGenerator{
c: cnr,
@ -1667,7 +1667,7 @@ func TestGetFromPastEpoch(t *testing.T) {
const curEpoch = 13
svc := &Service{
log: test.NewLogger(t, true),
log: test.NewLogger(t, false),
localStorage: newTestStorage(),
epochSource: testEpochReceiver(curEpoch),
traverserGenerator: &testTraverserGenerator{

View file

@ -88,7 +88,7 @@ func (x *readPrmCommon) SetNetmapEpoch(_ uint64) {
type GetObjectPrm struct {
readPrmCommon
ClientParams client.PrmObjectGet
cliPrm client.PrmObjectGet
obj oid.ID
}
@ -97,7 +97,7 @@ type GetObjectPrm struct {
//
// By default request will not be raw.
func (x *GetObjectPrm) SetRawFlag() {
x.ClientParams.Raw = true
x.cliPrm.MarkRaw()
}
// SetAddress sets object address.
@ -105,10 +105,8 @@ func (x *GetObjectPrm) SetRawFlag() {
// Required parameter.
func (x *GetObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
cnr := addr.Container()
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
x.cliPrm.FromContainer(addr.Container())
x.cliPrm.ByID(x.obj)
}
// GetObjectRes groups the resulting values of GetObject operation.
@ -136,15 +134,23 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
// request will almost definitely fail. The case can occur, for example,
// when session is bound to the parent object and child object is requested.
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.ClientParams.Session = prm.tokenSession
prm.cliPrm.WithinSession(*prm.tokenSession)
}
prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Key = prm.key
if prm.tokenBearer != nil {
prm.cliPrm.WithBearerToken(*prm.tokenBearer)
}
rdr, err := prm.cli.ObjectGetInit(ctx, prm.ClientParams)
if prm.local {
prm.cliPrm.MarkLocal()
}
prm.cliPrm.WithXHeaders(prm.xHeaders...)
if prm.key != nil {
prm.cliPrm.UseKey(*prm.key)
}
rdr, err := prm.cli.ObjectGetInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init object reading: %w", err)
}
@ -181,7 +187,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
type HeadObjectPrm struct {
readPrmCommon
ClientParams client.PrmObjectHead
cliPrm client.PrmObjectHead
obj oid.ID
}
@ -190,7 +196,7 @@ type HeadObjectPrm struct {
//
// By default request will not be raw.
func (x *HeadObjectPrm) SetRawFlag() {
x.ClientParams.Raw = true
x.cliPrm.MarkRaw()
}
// SetAddress sets object address.
@ -198,10 +204,8 @@ func (x *HeadObjectPrm) SetRawFlag() {
// Required parameter.
func (x *HeadObjectPrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
cnr := addr.Container()
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
x.cliPrm.FromContainer(addr.Container())
x.cliPrm.ByID(x.obj)
}
// HeadObjectRes groups the resulting values of GetObject operation.
@ -226,16 +230,22 @@ func (x HeadObjectRes) Header() *objectSDK.Object {
//
// HeadObject ignores the provided session if it is not related to the requested objectSDK.
func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.ClientParams.Session = prm.tokenSession
if prm.local {
prm.cliPrm.MarkLocal()
}
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.XHeaders = prm.xHeaders
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.cliPrm.WithinSession(*prm.tokenSession)
}
cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams)
if prm.tokenBearer != nil {
prm.cliPrm.WithBearerToken(*prm.tokenBearer)
}
prm.cliPrm.WithXHeaders(prm.xHeaders...)
cliRes, err := prm.cli.ObjectHead(ctx, prm.cliPrm)
if err == nil {
// pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status())
@ -262,7 +272,7 @@ type PayloadRangePrm struct {
ln uint64
ClientParams client.PrmObjectRange
cliPrm client.PrmObjectRange
obj oid.ID
}
@ -271,7 +281,7 @@ type PayloadRangePrm struct {
//
// By default request will not be raw.
func (x *PayloadRangePrm) SetRawFlag() {
x.ClientParams.Raw = true
x.cliPrm.MarkRaw()
}
// SetAddress sets object address.
@ -279,17 +289,15 @@ func (x *PayloadRangePrm) SetRawFlag() {
// Required parameter.
func (x *PayloadRangePrm) SetAddress(addr oid.Address) {
x.obj = addr.Object()
cnr := addr.Container()
x.ClientParams.ContainerID = &cnr
x.ClientParams.ObjectID = &x.obj
x.cliPrm.FromContainer(addr.Container())
x.cliPrm.ByID(x.obj)
}
// SetRange range of the object payload to be read.
//
// Required parameter.
func (x *PayloadRangePrm) SetRange(rng *objectSDK.Range) {
x.ClientParams.Offset = rng.GetOffset()
x.cliPrm.SetOffset(rng.GetOffset())
x.ln = rng.GetLength()
}
@ -321,17 +329,23 @@ const maxInitialBufferSize = 1024 * 1024 // 1 MiB
//
// PayloadRange ignores the provided session if it is not related to the requested objectSDK.
func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.ClientParams.Session = prm.tokenSession
if prm.local {
prm.cliPrm.MarkLocal()
}
prm.ClientParams.XHeaders = prm.xHeaders
prm.ClientParams.BearerToken = prm.tokenBearer
prm.ClientParams.Local = prm.local
prm.ClientParams.Length = prm.ln
// see details in same statement of GetObject
if prm.tokenSession != nil && prm.tokenSession.AssertObject(prm.obj) {
prm.cliPrm.WithinSession(*prm.tokenSession)
}
rdr, err := prm.cli.ObjectRangeInit(ctx, prm.ClientParams)
if prm.tokenBearer != nil {
prm.cliPrm.WithBearerToken(*prm.tokenBearer)
}
prm.cliPrm.SetLength(prm.ln)
prm.cliPrm.WithXHeaders(prm.xHeaders...)
rdr, err := prm.cli.ObjectRangeInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err)
}

View file

@ -1,119 +0,0 @@
package putsvc
import (
"context"
"fmt"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
type nodeIterator struct {
traversal
cfg *cfg
}
func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
return &nodeIterator{
traversal: traversal{
opts: opts,
mExclude: make(map[string]*bool),
},
cfg: c,
}
}
func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
traverser, err := placement.NewTraverser(n.traversal.opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
resErr := &atomic.Value{}
// Must iterate over all replicas, regardless of whether there are identical nodes there.
// At the same time need to exclude identical nodes from processing.
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if n.forEachAddress(ctx, traverser, addrs, f, resErr) {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resErr.Load().(error)
return err
}
// perform additional container broadcast if needed
if n.traversal.submitPrimaryPlacementFinish() {
err := n.forEachNode(ctx, f)
if err != nil {
n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
return nil
}
func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
var wg sync.WaitGroup
for _, addr := range addrs {
addr := addr
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
// This can happen only during additional container broadcast.
continue
}
workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
item := new(bool)
wg.Add(1)
if err := workerPool.Submit(func() {
defer wg.Done()
err := f(ctx, nodeDesc{local: isLocal, info: addr})
if err != nil {
resErr.Store(err)
svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
return true
}
// Mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
n.traversal.submitProcessed(addr, item)
}
wg.Wait()
return false
}
func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
}

View file

@ -3,11 +3,18 @@ package putsvc
import (
"context"
"fmt"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"go.uber.org/zap"
)
type preparedObjectTarget interface {
@ -15,19 +22,22 @@ type preparedObjectTarget interface {
}
type distributedTarget struct {
placementOpts []placement.Option
extraBroadcastEnabled bool
traversal traversal
obj *objectSDK.Object
objMeta object.ContentMeta
*cfg
payload *payload
nodeTargetInitializer func(nodeDesc) preparedObjectTarget
getWorkerPool func([]byte) (util.WorkerPool, bool)
relay func(context.Context, nodeDesc) error
fmt *object.FormatValidator
log *logger.Logger
}
// parameters and state of container traversal.
@ -127,9 +137,15 @@ func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Obje
var err error
if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil {
if t.objMeta, err = t.fmt.ValidateContent(t.obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
}
if len(t.obj.Children()) > 0 {
// enabling extra broadcast for linking objects
t.traversal.extraBroadcastEnabled = true
}
return t.iteratePlacement(ctx)
}
@ -150,7 +166,90 @@ func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error
func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
id, _ := t.obj.ID()
iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
return iter.forEachNode(ctx, t.sendObject)
traverser, err := placement.NewTraverser(
append(t.traversal.opts, placement.ForObject(id))...,
)
if err != nil {
return fmt.Errorf("(%T) could not create object placement traverser: %w", t, err)
}
resErr := &atomic.Value{}
// Must iterate over all replicas, regardless of whether there are identical nodes there.
// At the same time need to exclude identical nodes from processing.
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if t.iterateAddresses(ctx, traverser, addrs, resErr) {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resErr.Load().(error)
return err
}
// perform additional container broadcast if needed
if t.traversal.submitPrimaryPlacementFinish() {
err = t.iteratePlacement(ctx)
if err != nil {
t.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
return nil
}
func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool {
wg := &sync.WaitGroup{}
for i := range addrs {
addr := addrs[i]
if val := t.traversal.mExclude[string(addr.PublicKey())]; val != nil {
// Check is node processed successful on the previous iteration.
if *val {
traverser.SubmitSuccess()
}
// it can happen only during additional container broadcast
continue
}
wg.Add(1)
item := new(bool)
workerPool, isLocal := t.getWorkerPool(addr.PublicKey())
if err := workerPool.Submit(func() {
defer wg.Done()
err := t.sendObject(ctx, nodeDesc{local: isLocal, info: addr})
if err != nil {
resErr.Store(err)
svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(t.log, "PUT", err)
return true
}
// mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
t.traversal.submitProcessed(addr, item)
}
wg.Wait()
return false
}

View file

@ -29,14 +29,6 @@ type ClientConstructor interface {
Get(client.NodeInfo) (client.MultiAddressClient, error)
}
type InnerRing interface {
InnerRingKeys() ([][]byte, error)
}
type FormatValidatorConfig interface {
VerifySessionTokenIssuer() bool
}
type cfg struct {
keyStorage *objutil.KeyStorage
@ -59,8 +51,6 @@ type cfg struct {
clientConstructor ClientConstructor
log *logger.Logger
verifySessionTokenIssuer bool
}
func NewService(ks *objutil.KeyStorage,
@ -71,7 +61,6 @@ func NewService(ks *objutil.KeyStorage,
ns netmap.Source,
nk netmap.AnnouncedKeys,
nst netmap.State,
ir InnerRing,
opts ...Option) *Service {
c := &cfg{
remotePool: util.NewPseudoWorkerPool(),
@ -91,15 +80,7 @@ func NewService(ks *objutil.KeyStorage,
opts[i](c)
}
c.fmtValidator = object.NewFormatValidator(
object.WithLockSource(os),
object.WithNetState(nst),
object.WithInnerRing(ir),
object.WithNetmapSource(ns),
object.WithContainersSource(cs),
object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer),
object.WithLogger(c.log),
)
c.fmtValidator = object.NewFormatValidator(object.WithLockSource(os), object.WithNetState(nst))
return &Service{
cfg: c,
@ -123,9 +104,3 @@ func WithLogger(l *logger.Logger) Option {
c.log = l
}
}
func WithVerifySessionTokenIssuer(v bool) Option {
return func(c *cfg) {
c.verifySessionTokenIssuer = v
}
}

View file

@ -8,6 +8,7 @@ import (
"fmt"
"hash"
"sync"
"sync/atomic"
objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
@ -149,19 +150,18 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
if err != nil {
return err
}
iter := s.cfg.newNodeIterator(placementOptions)
iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
traversal := &traversal{
opts: placementOptions,
extraBroadcastEnabled: len(obj.Children()) > 0 ||
(!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock)),
mExclude: make(map[string]*bool),
}
signer := &putSingleRequestSigner{
req: req,
keyStorage: s.keyStorage,
signer: &sync.Once{},
}
return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error {
return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
return s.saveAccordingToPlacement(ctx, obj, signer, traversal, meta)
}
func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) ([]placement.Option, error) {
@ -199,6 +199,97 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
return result, nil
}
func (s *Service) saveAccordingToPlacement(ctx context.Context, obj *objectSDK.Object, signer *putSingleRequestSigner,
traversal *traversal, meta object.ContentMeta) error {
traverser, err := placement.NewTraverser(traversal.opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
var resultError atomic.Value
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
if stop := s.saveToPlacementNodes(ctx, obj, signer, traversal, traverser, addrs, meta, &resultError); stop {
break
}
}
if !traverser.Success() {
var err errIncompletePut
err.singleErr, _ = resultError.Load().(error)
return err
}
if traversal.submitPrimaryPlacementFinish() {
err = s.saveAccordingToPlacement(ctx, obj, signer, traversal, meta)
if err != nil {
s.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
}
}
return nil
}
func (s *Service) saveToPlacementNodes(ctx context.Context,
obj *objectSDK.Object,
signer *putSingleRequestSigner,
traversal *traversal,
traverser *placement.Traverser,
nodeAddresses []placement.Node,
meta object.ContentMeta,
resultError *atomic.Value,
) bool {
wg := sync.WaitGroup{}
for _, nodeAddress := range nodeAddresses {
nodeAddress := nodeAddress
if ok := traversal.mExclude[string(nodeAddress.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
continue
}
local := false
workerPool := s.remotePool
if s.netmapKeys.IsLocalKey(nodeAddress.PublicKey()) {
local = true
workerPool = s.localPool
}
item := new(bool)
wg.Add(1)
if err := workerPool.Submit(func() {
defer wg.Done()
err := s.saveToPlacementNode(ctx, &nodeDesc{local: local, info: nodeAddress}, obj, signer, meta)
if err != nil {
resultError.Store(err)
svcutil.LogServiceError(s.log, "PUT", nodeAddress.Addresses(), err)
return
}
traverser.SubmitSuccess()
*item = true
}); err != nil {
wg.Done()
svcutil.LogWorkerPoolError(s.log, "PUT", err)
return true
}
traversal.submitProcessed(nodeAddress, item)
}
wg.Wait()
return false
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object,
signer *putSingleRequestSigner, meta object.ContentMeta) error {
if nodeDesc.local {

View file

@ -215,10 +215,13 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) *distributedTarget {
withBroadcast := !prm.common.LocalOnly() && (typ == objectSDK.TypeTombstone || typ == objectSDK.TypeLock)
return &distributedTarget{
cfg: p.cfg,
placementOpts: prm.traverseOpts,
traversal: traversal{
opts: prm.traverseOpts,
extraBroadcastEnabled: withBroadcast,
},
payload: getPayload(),
getWorkerPool: p.getWorkerPool,
nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
if node.local {
return localTarget{
@ -237,6 +240,8 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) *distributedTarget {
return rt
},
relay: relay,
fmt: p.fmtValidator,
log: p.log,
}
}
@ -274,9 +279,9 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
}, nil
}
func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
if c.netmapKeys.IsLocalKey(pub) {
return c.localPool, true
func (p *Streamer) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
if p.netmapKeys.IsLocalKey(pub) {
return p.localPool, true
}
return c.remotePool, false
return p.remotePool, false
}

View file

@ -3,7 +3,6 @@ package searchsvc
import (
"context"
"encoding/hex"
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -11,7 +10,12 @@ import (
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) error {
func (exec *execCtx) executeOnContainer(ctx context.Context) {
if exec.isLocal() {
exec.log.Debug(logs.SearchReturnResultDirectly)
return
}
lookupDepth := exec.netmapLookupDepth()
exec.log.Debug(logs.TryingToExecuteInContainer,
@ -19,12 +23,13 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
)
// initialize epoch number
if err := exec.initEpoch(); err != nil {
return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err)
ok := exec.initEpoch()
if !ok {
return
}
for {
if err := exec.processCurrentEpoch(ctx); err != nil {
if exec.processCurrentEpoch(ctx) {
break
}
@ -39,17 +44,18 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error {
exec.curProcEpoch--
}
return nil
exec.status = statusOK
exec.err = nil
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
traverser, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch)
if err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err)
traverser, ok := exec.generateTraverser(exec.containerID())
if !ok {
return true
}
ctx, cancel := context.WithCancel(ctx)
@ -85,7 +91,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
mtx.Lock()
exec.status = statusUndefined
exec.err = err
mtx.Unlock()
exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient)
return
}
@ -98,17 +109,13 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
}
mtx.Lock()
err = exec.writeIDList(ids)
exec.writeIDList(ids)
mtx.Unlock()
if err != nil {
exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
return
}
}(i)
}
wg.Wait()
}
return nil
return false
}

View file

@ -1,6 +1,8 @@
package searchsvc
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -8,16 +10,34 @@ import (
"go.uber.org/zap"
)
type statusError struct {
status int
err error
}
type execCtx struct {
svc *Service
prm Prm
statusError
log *logger.Logger
curProcEpoch uint64
}
const (
statusUndefined int = iota
statusOK
)
func (exec *execCtx) prepare() {
if _, ok := exec.prm.writer.(*uniqueIDWriter); !ok {
exec.prm.writer = newUniqueAddressWriter(exec.prm.writer)
}
}
func (exec *execCtx) setLogger(l *logger.Logger) {
exec.log = &logger.Logger{Logger: l.With(
zap.String("request", "SEARCH"),
@ -48,24 +68,64 @@ func (exec *execCtx) netmapLookupDepth() uint64 {
return exec.prm.common.NetmapLookupDepth()
}
func (exec *execCtx) initEpoch() error {
func (exec *execCtx) initEpoch() bool {
exec.curProcEpoch = exec.netmapEpoch()
if exec.curProcEpoch > 0 {
return nil
return true
}
e, err := exec.svc.currentEpochReceiver.Epoch()
if err != nil {
return err
}
e, err := exec.svc.currentEpochReceiver.currentEpoch()
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
return false
case err == nil:
exec.curProcEpoch = e
return nil
return true
}
}
func (exec *execCtx) writeIDList(ids []oid.ID) error {
func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool) {
t, err := exec.svc.traverserGenerator.generateTraverser(cnr, exec.curProcEpoch)
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.SearchCouldNotGenerateContainerTraverser,
zap.String("error", err.Error()),
)
return nil, false
case err == nil:
return t, true
}
}
func (exec *execCtx) writeIDList(ids []oid.ID) {
ids = exec.filterAllowedObjectIDs(ids)
return exec.prm.writer.WriteIDs(ids)
err := exec.prm.writer.WriteIDs(ids)
switch {
default:
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers,
zap.String("error", err.Error()),
)
case err == nil:
exec.status = statusOK
exec.err = nil
}
}
func (exec *execCtx) filterAllowedObjectIDs(objIDs []oid.ID) []oid.ID {

View file

@ -2,22 +2,24 @@ package searchsvc
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
func (exec *execCtx) executeLocal(ctx context.Context) error {
func (exec *execCtx) executeLocal(ctx context.Context) {
ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
return err
exec.status = statusUndefined
exec.err = err
exec.log.Debug(logs.SearchLocalOperationFailed,
zap.String("error", err.Error()),
)
return
}
if err := exec.writeIDList(ids); err != nil {
return fmt.Errorf("%s: %w", logs.SearchCouldNotWriteObjectIdentifiers, err)
}
return nil
exec.writeIDList(ids)
}

Some files were not shown because too many files have changed in this diff Show more