From 44d5412e10dd1448c8a41db3768534b8b4b79fd0 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 31 Mar 2023 12:51:12 +0300
Subject: [PATCH 0001/1943] [#181] ir: Do not deposit notary GAS by
non-alphabet nodes
Signed-off-by: Pavel Karpy
---
pkg/innerring/innerring.go | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 063d0f7cd..38023932f 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -149,14 +149,16 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
return err
}
- err = s.initMainNotary(ctx)
- if err != nil {
- return err
- }
+ if s.IsAlphabet() {
+ err = s.initMainNotary(ctx)
+ if err != nil {
+ return err
+ }
- err = s.initSideNotary(ctx)
- if err != nil {
- return err
+ err = s.initSideNotary(ctx)
+ if err != nil {
+ return err
+ }
}
prm := governance.VoteValidatorPrm{}
From db5321309d073a0869a271897d866357afa30f60 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 31 Mar 2023 12:52:16 +0300
Subject: [PATCH 0002/1943] [#181] ir: Do not sync the Alphabet by non-alphabet
nodes
Signed-off-by: Pavel Karpy
---
pkg/innerring/initialization.go | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 31a1bcd60..5a8fdcc3d 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -10,7 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
cont "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/container"
- frostfs "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
nodevalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation"
@@ -90,7 +90,9 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper,
AuditSettlementsHandler: s.onlyAlphabetEventHandler(
settlementProcessor.HandleAuditEvent,
),
- AlphabetSyncHandler: alphaSync,
+ AlphabetSyncHandler: s.onlyAlphabetEventHandler(
+ alphaSync,
+ ),
NodeValidator: nodevalidator.New(
&netMapCandidateStateValidator,
addrvalidator.New(),
From f09ee27af937cb6608e24a95030db79ceb508ffb Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Thu, 30 Mar 2023 18:24:07 +0300
Subject: [PATCH 0003/1943] [#181] ir: Do not process container estimations by
non-alphabet nodes
Signed-off-by: Pavel Karpy
---
pkg/innerring/blocktimer.go | 11 +++++++++++
pkg/innerring/processors/netmap/process_epoch.go | 2 +-
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index 747f36fdf..eb74e44d4 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -20,6 +20,10 @@ type (
EpochDuration() uint64
}
+ alphaState interface {
+ IsAlphabet() bool
+ }
+
subEpochEventHandler struct {
handler event.Handler // handle to execute
durationMul uint32 // X: X/Y of epoch in blocks
@@ -31,6 +35,8 @@ type (
epochTimerArgs struct {
l *logger.Logger
+ alphabetState alphaState
+
newEpochHandlers []newEpochHandler
cnrWrapper *container.Client // to invoke stop container estimation
@@ -91,6 +97,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
args.stopEstimationDMul,
args.stopEstimationDDiv,
func() {
+ if !args.alphabetState.IsAlphabet() {
+ args.l.Debug("non-alphabet mode, do not stop container estimations")
+ return
+ }
+
epochN := args.epoch.EpochCounter()
if epochN == 0 { // estimates are invalid in genesis epoch
return
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index 48de528e1..ffcddc497 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -50,7 +50,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
prm.SetEpoch(epoch - 1)
prm.SetHash(ev.TxHash())
- if epoch > 0 { // estimates are invalid in genesis epoch
+ if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch
err = np.containerWrp.StartEstimation(prm)
if err != nil {
From aa92f977ef88addc3de6f7814a79592ba79011a9 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Thu, 30 Mar 2023 18:24:37 +0300
Subject: [PATCH 0004/1943] [#181] ir: Do not pay for audit by non-alphabet
nodes
Signed-off-by: Pavel Karpy
---
pkg/innerring/processors/settlement/calls.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go
index b4f44543e..33191662b 100644
--- a/pkg/innerring/processors/settlement/calls.go
+++ b/pkg/innerring/processors/settlement/calls.go
@@ -13,6 +13,12 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
epoch := ev.Epoch()
+ if !p.state.IsAlphabet() {
+ p.log.Info("non alphabet mode, ignore audit payments")
+
+ return
+ }
+
log := &logger.Logger{Logger: p.log.With(
zap.Uint64("epoch", epoch),
)}
From 2bdf7126b89217941712c854dc4818c4851c7e25 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Thu, 30 Mar 2023 18:25:45 +0300
Subject: [PATCH 0005/1943] [#181] Update CHANGELOG
Signed-off-by: Pavel Karpy
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cc2f4adad..ed92f3a53 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -53,6 +53,7 @@ Changelog for FrostFS Node
- Adding of public key for nns group `group.frostfs` at init step (#130)
- Iterating over just removed files by FSTree (#98)
- Parts of a locked object could not be removed anymore (#141)
+- Non-alphabet nodes do not try to handle alphabet events (#181)
### Removed
### Updated
From a69c6d1ec9e43f49e6e588d8770ea05ccbcef050 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Mon, 27 Feb 2023 17:17:55 +0300
Subject: [PATCH 0006/1943] [#2272] morph: Do not subscribe to events without
listening
It led to a neo-go dead-lock in the `subscriber` component. Subscribing to
notifications is the same RPC as any others, so it could also be blocked
forever if no async listening (reading the notification channel) routine
exists. If a number of subscriptions is big enough (or a caller is lucky
enough) subscribing loop might have not finished subscribing before the
first notification is received and then: subscribing RPC is blocked by
received notification (non)handling and listening notifications routine is
blocked by not finished subscription loop.
That commit starts listening notification channel _before_ any subscription
actions.
Signed-off-by: Pavel Karpy
---
CHANGELOG.md | 2 +-
pkg/morph/event/listener.go | 87 ++++++++++++++----------------
pkg/morph/subscriber/subscriber.go | 41 +++++++++-----
3 files changed, 69 insertions(+), 61 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed92f3a53..6793ed340 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -44,7 +44,7 @@ Changelog for FrostFS Node
- Possible deadlock in write-cache (#2239)
- Fix `*_req_count` and `*_req_count_success` metric values (#2241)
- Storage ID update by write-cache (#2244)
-- `neo-go` client deadlock on subscription restoration (#2244)
+- `neo-go` client deadlock on subscription (#2244, #2272)
- Possible panic during write-cache initialization (#2234)
- Do not fetch an object if `meta` is missing it (#61)
- Create contract wallet only by `init` and `update-config` command (#63)
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index ed2b95026..64fdc3df3 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -158,6 +157,19 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
}
func (l *listener) listen(ctx context.Context, intError chan<- error) error {
+ // mark listener as started
+ l.started = true
+
+ subErrCh := make(chan error)
+
+ go l.subscribe(subErrCh)
+
+ l.listenLoop(ctx, intError, subErrCh)
+
+ return nil
+}
+
+func (l *listener) subscribe(errCh chan error) {
// create the list of listening contract hashes
hashes := make([]util.Uint160, 0)
@@ -175,71 +187,50 @@ func (l *listener) listen(ctx context.Context, intError chan<- error) error {
hashes = append(hashes, hashType.ScriptHash())
}
-
- // mark listener as started
- l.started = true
-
l.mtx.RUnlock()
- chEvent, err := l.subscriber.SubscribeForNotification(hashes...)
+ err := l.subscriber.SubscribeForNotification(hashes...)
if err != nil {
- return err
+ errCh <- fmt.Errorf("could not subscribe for notifications: %w", err)
+ return
}
- l.listenLoop(ctx, chEvent, intError)
-
- return nil
-}
-
-// nolint: funlen, gocognit
-func (l *listener) listenLoop(ctx context.Context, chEvent <-chan *state.ContainedNotificationEvent, intErr chan<- error) {
- var (
- blockChan <-chan *block.Block
-
- notaryChan <-chan *result.NotaryRequestEvent
-
- err error
- )
-
if len(l.blockHandlers) > 0 {
- if blockChan, err = l.subscriber.BlockNotifications(); err != nil {
- if intErr != nil {
- intErr <- fmt.Errorf("could not open block notifications channel: %w", err)
- } else {
- l.log.Debug("could not open block notifications channel",
- zap.String("error", err.Error()),
- )
- }
-
+ if err = l.subscriber.BlockNotifications(); err != nil {
+ errCh <- fmt.Errorf("could not subscribe for blocks: %w", err)
return
}
- } else {
- blockChan = make(chan *block.Block)
}
if l.listenNotary {
- if notaryChan, err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
- if intErr != nil {
- intErr <- fmt.Errorf("could not open notary notifications channel: %w", err)
- } else {
- l.log.Debug("could not open notary notifications channel",
- zap.String("error", err.Error()),
- )
- }
-
+ if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil {
+ errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err)
return
}
}
+}
+
+// nolint: funlen, gocognit
+func (l *listener) listenLoop(ctx context.Context, intErr chan<- error, subErrCh chan error) {
+ chs := l.subscriber.NotificationChannels()
loop:
for {
select {
+ case err := <-subErrCh:
+ if intErr != nil {
+ intErr <- err
+ } else {
+ l.log.Error("stop event listener by error", zap.Error(err))
+ }
+
+ break loop
case <-ctx.Done():
l.log.Info("stop event listener by context",
zap.String("reason", ctx.Err().Error()),
)
break loop
- case notifyEvent, ok := <-chEvent:
+ case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
l.log.Warn("stop event listener by notification channel")
if intErr != nil {
@@ -252,13 +243,13 @@ loop:
continue loop
}
- if err = l.pool.Submit(func() {
+ if err := l.pool.Submit(func() {
l.parseAndHandleNotification(notifyEvent)
}); err != nil {
l.log.Warn("listener worker pool drained",
zap.Int("capacity", l.pool.Cap()))
}
- case notaryEvent, ok := <-notaryChan:
+ case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
l.log.Warn("stop event listener by notary channel")
if intErr != nil {
@@ -271,13 +262,13 @@ loop:
continue loop
}
- if err = l.pool.Submit(func() {
+ if err := l.pool.Submit(func() {
l.parseAndHandleNotary(notaryEvent)
}); err != nil {
l.log.Warn("listener worker pool drained",
zap.Int("capacity", l.pool.Cap()))
}
- case b, ok := <-blockChan:
+ case b, ok := <-chs.BlockCh:
if !ok {
l.log.Warn("stop event listener by block channel")
if intErr != nil {
@@ -290,7 +281,7 @@ loop:
continue loop
}
- if err = l.pool.Submit(func() {
+ if err := l.pool.Submit(func() {
for i := range l.blockHandlers {
l.blockHandlers[i](b)
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 6229e6f30..17bed5b2d 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -17,12 +17,21 @@ import (
)
type (
+ NotificationChannels struct {
+ BlockCh <-chan *block.Block
+ NotificationsCh <-chan *state.ContainedNotificationEvent
+ NotaryRequestsCh <-chan *result.NotaryRequestEvent
+ }
+
// Subscriber is an interface of the NotificationEvent listener.
Subscriber interface {
- SubscribeForNotification(...util.Uint160) (<-chan *state.ContainedNotificationEvent, error)
+ SubscribeForNotification(...util.Uint160) error
UnsubscribeForNotification()
- BlockNotifications() (<-chan *block.Block, error)
- SubscribeForNotaryRequests(mainTXSigner util.Uint160) (<-chan *result.NotaryRequestEvent, error)
+ BlockNotifications() error
+ SubscribeForNotaryRequests(mainTXSigner util.Uint160) error
+
+ NotificationChannels() NotificationChannels
+
Close()
}
@@ -46,6 +55,14 @@ type (
}
)
+func (s *subscriber) NotificationChannels() NotificationChannels {
+ return NotificationChannels{
+ BlockCh: s.blockChan,
+ NotificationsCh: s.notifyChan,
+ NotaryRequestsCh: s.notaryChan,
+ }
+}
+
var (
errNilParams = errors.New("chain/subscriber: config was not provided to the constructor")
@@ -54,7 +71,7 @@ var (
errNilClient = errors.New("chain/subscriber: client was not provided to the constructor")
)
-func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) (<-chan *state.ContainedNotificationEvent, error) {
+func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error {
s.Lock()
defer s.Unlock()
@@ -69,14 +86,14 @@ func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) (<-chan
_ = s.client.UnsubscribeContract(hash)
}
- return nil, err
+ return err
}
// save notification id
notifyIDs[contracts[i]] = struct{}{}
}
- return s.notifyChan, nil
+ return nil
}
func (s *subscriber) UnsubscribeForNotification() {
@@ -91,20 +108,20 @@ func (s *subscriber) Close() {
s.client.Close()
}
-func (s *subscriber) BlockNotifications() (<-chan *block.Block, error) {
+func (s *subscriber) BlockNotifications() error {
if err := s.client.SubscribeForNewBlocks(); err != nil {
- return nil, fmt.Errorf("could not subscribe for new block events: %w", err)
+ return fmt.Errorf("could not subscribe for new block events: %w", err)
}
- return s.blockChan, nil
+ return nil
}
-func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) (<-chan *result.NotaryRequestEvent, error) {
+func (s *subscriber) SubscribeForNotaryRequests(mainTXSigner util.Uint160) error {
if err := s.client.SubscribeForNotaryRequests(mainTXSigner); err != nil {
- return nil, fmt.Errorf("could not subscribe for notary request events: %w", err)
+ return fmt.Errorf("could not subscribe for notary request events: %w", err)
}
- return s.notaryChan, nil
+ return nil
}
func (s *subscriber) routeNotifications(ctx context.Context) {
From 00377dca8343e326eb4d497ed733e6acd51fd793 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 12:24:01 +0300
Subject: [PATCH 0007/1943] [#199] putsvc: Refactor placement iterator
Resolve funlen linter for iteratePlacement method
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 111 ++++++++++++-------------
1 file changed, 53 insertions(+), 58 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 47104b323..3b5f4ec53 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -164,7 +164,6 @@ func (t *distributedTarget) sendObject(node nodeDesc) error {
return nil
}
-// nolint: funlen
func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
id, _ := t.obj.ID()
@@ -175,72 +174,22 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
return nil, fmt.Errorf("(%T) could not create object placement traverser: %w", t, err)
}
- var resErr atomic.Value
+ resErr := &atomic.Value{}
-loop:
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
- wg := new(sync.WaitGroup)
-
- for i := range addrs {
- if t.traversal.processed(addrs[i]) {
- // it can happen only during additional container broadcast
- continue
- }
-
- wg.Add(1)
-
- addr := addrs[i]
-
- isLocal := t.isLocalKey(addr.PublicKey())
-
- var workerPool util.WorkerPool
-
- if isLocal {
- workerPool = t.localPool
- } else {
- workerPool = t.remotePool
- }
-
- if err := workerPool.Submit(func() {
- defer wg.Done()
-
- err := f(nodeDesc{local: isLocal, info: addr})
-
- // mark the container node as processed in order to exclude it
- // in subsequent container broadcast. Note that we don't
- // process this node during broadcast if primary placement
- // on it failed.
- t.traversal.submitProcessed(addr)
-
- if err != nil {
- resErr.Store(err)
- svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
- return
- }
-
- traverser.SubmitSuccess()
- }); err != nil {
- wg.Done()
-
- svcutil.LogWorkerPoolError(t.log, "PUT", err)
-
- break loop
- }
+ if t.iterateAddresses(traverser, addrs, f, resErr) {
+ break
}
-
- wg.Wait()
}
if !traverser.Success() {
var err errIncompletePut
-
err.singleErr, _ = resErr.Load().(error)
-
return nil, err
}
@@ -248,10 +197,7 @@ loop:
if t.traversal.submitPrimaryPlacementFinish() {
_, err = t.iteratePlacement(f)
if err != nil {
- t.log.Error("additional container broadcast failure",
- zap.Error(err),
- )
-
+ t.log.Error("additional container broadcast failure", zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -261,3 +207,52 @@ loop:
return new(transformer.AccessIdentifiers).
WithSelfID(id), nil
}
+
+func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, addrs []placement.Node, f func(nodeDesc) error, resErr *atomic.Value) bool {
+ wg := &sync.WaitGroup{}
+
+ for i := range addrs {
+ if t.traversal.processed(addrs[i]) {
+ // it can happen only during additional container broadcast
+ continue
+ }
+
+ wg.Add(1)
+
+ addr := addrs[i]
+ isLocal := t.isLocalKey(addr.PublicKey())
+
+ workerPool := t.remotePool
+ if isLocal {
+ workerPool = t.localPool
+ }
+
+ if err := workerPool.Submit(func() {
+ defer wg.Done()
+
+ err := f(nodeDesc{local: isLocal, info: addr})
+
+ // mark the container node as processed in order to exclude it
+ // in subsequent container broadcast. Note that we don't
+ // process this node during broadcast if primary placement
+ // on it failed.
+ t.traversal.submitProcessed(addr)
+
+ if err != nil {
+ resErr.Store(err)
+ svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
+ return
+ }
+
+ traverser.SubmitSuccess()
+ }); err != nil {
+ wg.Done()
+ svcutil.LogWorkerPoolError(t.log, "PUT", err)
+ return true
+ }
+ }
+
+ wg.Wait()
+
+ return false
+}
From c635164b4e36f7c15b30a1c7a742c9c26d4bce16 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 13:09:44 +0300
Subject: [PATCH 0008/1943] [#199] putsvc: Refactor streamer pool
Resolve staticcheck linter for putBytesPool
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 6 +++---
pkg/services/object/put/pool.go | 16 ++++++++++------
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 3b5f4ec53..e4566157e 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -28,7 +28,7 @@ type distributedTarget struct {
obj *objectSDK.Object
objMeta object.ContentMeta
- payload []byte
+ payload *payload
nodeTargetInitializer func(nodeDesc) preparedObjectTarget
@@ -122,7 +122,7 @@ func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
}
func (t *distributedTarget) Write(p []byte) (n int, err error) {
- t.payload = append(t.payload, p...)
+ t.payload.Data = append(t.payload.Data, p...)
return len(p), nil
}
@@ -133,7 +133,7 @@ func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
t.payload = nil
}()
- t.obj.SetPayload(t.payload)
+ t.obj.SetPayload(t.payload.Data)
var err error
diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/put/pool.go
index 705273227..5726856e5 100644
--- a/pkg/services/object/put/pool.go
+++ b/pkg/services/object/put/pool.go
@@ -6,15 +6,19 @@ import (
const defaultAllocSize = 1024
+type payload struct {
+ Data []byte
+}
+
var putBytesPool = &sync.Pool{
- New: func() any { return make([]byte, 0, defaultAllocSize) },
+ New: func() any { return &payload{Data: make([]byte, 0, defaultAllocSize)} },
}
-func getPayload() []byte {
- return putBytesPool.Get().([]byte)
+func getPayload() *payload {
+ return putBytesPool.Get().(*payload)
}
-func putPayload(p []byte) {
- //nolint:staticcheck
- putBytesPool.Put(p[:0])
+func putPayload(p *payload) {
+ p.Data = p.Data[:0]
+ putBytesPool.Put(p)
}
From 5b59966b6a64e7765bd7eb46b2b5e1f576f2ff92 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 14:23:53 +0300
Subject: [PATCH 0009/1943] [#199] putsvc: Refactor put object
Resolve containedctx linter for streamer and remote target
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/network/transport/object/grpc/service.go | 8 +--
pkg/services/object/acl/v2/service.go | 12 ++---
pkg/services/object/common.go | 4 +-
pkg/services/object/delete/util.go | 6 +--
pkg/services/object/metrics.go | 14 ++---
pkg/services/object/put/distributed.go | 23 ++++----
pkg/services/object/put/local.go | 3 +-
pkg/services/object/put/remote.go | 10 ++--
pkg/services/object/put/service.go | 5 +-
pkg/services/object/put/streamer.go | 12 ++---
pkg/services/object/put/v2/service.go | 5 +-
pkg/services/object/put/v2/streamer.go | 9 ++--
pkg/services/object/put/validation.go | 9 ++--
pkg/services/object/response.go | 20 +++----
pkg/services/object/server.go | 6 +--
pkg/services/object/sign.go | 20 +++----
pkg/services/object/transport_splitter.go | 4 +-
.../object_manager/transformer/fmt.go | 9 ++--
.../object_manager/transformer/transformer.go | 34 ++++++------
.../object_manager/transformer/types.go | 6 +--
.../object_manager/transformer/writer.go | 52 +++++++++++++++++++
pkg/services/util/response/client_stream.go | 9 ++--
pkg/services/util/sign.go | 12 ++---
24 files changed, 171 insertions(+), 125 deletions(-)
create mode 100644 pkg/services/object_manager/transformer/writer.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 8680aac28..3b6bdcc7d 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -70,8 +70,8 @@ func (c *cfg) MaxObjectSize() uint64 {
return sz
}
-func (s *objectSvc) Put(ctx context.Context) (objectService.PutObjectStream, error) {
- return s.put.Put(ctx)
+func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
+ return s.put.Put()
}
func (s *objectSvc) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 82e323a3c..7fa60f99c 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put(gStream.Context())
+ stream, err := s.srv.Put()
if err != nil {
return err
}
@@ -35,7 +35,7 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
req, err := gStream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
@@ -51,9 +51,9 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
return err
}
- if err := stream.Send(putReq); err != nil {
+ if err := stream.Send(gStream.Context(), putReq); err != nil {
if errors.Is(err, util.ErrAbortStream) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 6bf8c4405..1e451a99f 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -165,8 +165,8 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream
})
}
-func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- streamer, err := b.next.Put(ctx)
+func (b Service) Put() (object.PutObjectStream, error) {
+ streamer, err := b.next.Put()
return putStreamBasicChecker{
source: &b,
@@ -444,7 +444,7 @@ func (b Service) GetRangeHash(
}
// nolint: funlen
-func (p putStreamBasicChecker) Send(request *objectV2.PutRequest) error {
+func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
body := request.GetBody()
if body == nil {
return errEmptyBody
@@ -531,11 +531,11 @@ func (p putStreamBasicChecker) Send(request *objectV2.PutRequest) error {
}
}
- return p.next.Send(request)
+ return p.next.Send(ctx, request)
}
-func (p putStreamBasicChecker) CloseAndRecv() (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv()
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index e797f1a64..5b139d8eb 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -42,12 +42,12 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
+func (x *Common) Put() (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, errMaintenance
}
- return x.nextHandler.Put(ctx)
+ return x.nextHandler.Put()
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index a8ebb3065..cc5433740 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -108,7 +108,7 @@ func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error {
}
func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
- streamer, err := (*putsvc.Service)(w).Put(exec.context())
+ streamer, err := (*putsvc.Service)(w).Put()
if err != nil {
return nil, err
}
@@ -124,12 +124,12 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
return nil, err
}
- err = streamer.SendChunk(new(putsvc.PutChunkPrm).WithChunk(payload))
+ err = streamer.SendChunk(exec.context(), new(putsvc.PutChunkPrm).WithChunk(payload))
if err != nil {
return nil, err
}
- r, err := streamer.Close()
+ r, err := streamer.Close(exec.context())
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 9f15e834a..3ea16dafd 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -75,11 +75,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
+func (m MetricCollector) Put() (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put(ctx)
+ stream, err := m.next.Put()
if err != nil {
return nil, err
}
@@ -90,7 +90,7 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put(ctx)
+ return m.next.Put()
}
func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -179,17 +179,17 @@ func (s getStreamMetric) Send(resp *object.GetResponse) error {
return s.stream.Send(resp)
}
-func (s putStreamMetric) Send(req *object.PutRequest) error {
+func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error {
chunk, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartChunk)
if ok {
s.metrics.AddPutPayload(len(chunk.GetChunk()))
}
- return s.stream.Send(req)
+ return s.stream.Send(ctx, req)
}
-func (s putStreamMetric) CloseAndRecv() (*object.PutResponse, error) {
- res, err := s.stream.CloseAndRecv()
+func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ res, err := s.stream.CloseAndRecv(ctx)
s.metrics.IncPutReqCounter(err == nil)
s.metrics.AddPutReqDuration(time.Since(s.start))
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index e4566157e..d8b59487e 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
"sync"
"sync/atomic"
@@ -17,7 +18,7 @@ import (
type preparedObjectTarget interface {
WriteObject(*objectSDK.Object, object.ContentMeta) error
- Close() (*transformer.AccessIdentifiers, error)
+ Close(ctx context.Context) (*transformer.AccessIdentifiers, error)
}
type distributedTarget struct {
@@ -121,13 +122,13 @@ func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
return nil
}
-func (t *distributedTarget) Write(p []byte) (n int, err error) {
+func (t *distributedTarget) Write(_ context.Context, p []byte) (n int, err error) {
t.payload.Data = append(t.payload.Data, p...)
return len(p), nil
}
-func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
defer func() {
putPayload(t.payload)
t.payload = nil
@@ -146,10 +147,10 @@ func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
t.traversal.extraBroadcastEnabled = true
}
- return t.iteratePlacement(t.sendObject)
+ return t.iteratePlacement(ctx)
}
-func (t *distributedTarget) sendObject(node nodeDesc) error {
+func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
if !node.local && t.relay != nil {
return t.relay(node)
}
@@ -158,13 +159,13 @@ func (t *distributedTarget) sendObject(node nodeDesc) error {
if err := target.WriteObject(t.obj, t.objMeta); err != nil {
return fmt.Errorf("could not write header: %w", err)
- } else if _, err := target.Close(); err != nil {
+ } else if _, err := target.Close(ctx); err != nil {
return fmt.Errorf("could not close object stream: %w", err)
}
return nil
}
-func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
+func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer.AccessIdentifiers, error) {
id, _ := t.obj.ID()
traverser, err := placement.NewTraverser(
@@ -182,7 +183,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
break
}
- if t.iterateAddresses(traverser, addrs, f, resErr) {
+ if t.iterateAddresses(ctx, traverser, addrs, resErr) {
break
}
}
@@ -195,7 +196,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
// perform additional container broadcast if needed
if t.traversal.submitPrimaryPlacementFinish() {
- _, err = t.iteratePlacement(f)
+ _, err = t.iteratePlacement(ctx)
if err != nil {
t.log.Error("additional container broadcast failure", zap.Error(err))
// we don't fail primary operation because of broadcast failure
@@ -208,7 +209,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
WithSelfID(id), nil
}
-func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, addrs []placement.Node, f func(nodeDesc) error, resErr *atomic.Value) bool {
+func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool {
wg := &sync.WaitGroup{}
for i := range addrs {
@@ -230,7 +231,7 @@ func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, add
if err := workerPool.Submit(func() {
defer wg.Done()
- err := f(nodeDesc{local: isLocal, info: addr})
+ err := t.sendObject(ctx, nodeDesc{local: isLocal, info: addr})
// mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go
index f344f77e9..12e3a2eee 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/put/local.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -38,7 +39,7 @@ func (t *localTarget) WriteObject(obj *object.Object, meta objectCore.ContentMet
return nil
}
-func (t *localTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *localTarget) Close(_ context.Context) (*transformer.AccessIdentifiers, error) {
switch t.meta.Type() {
case object.TypeTombstone:
err := t.storage.Delete(objectCore.AddressOf(t.obj), t.meta.Objects())
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/put/remote.go
index 760de7508..6933abca6 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/put/remote.go
@@ -15,10 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
-// nolint: containedctx
type remoteTarget struct {
- ctx context.Context
-
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
@@ -51,7 +48,7 @@ func (t *remoteTarget) WriteObject(obj *object.Object, _ objectcore.ContentMeta)
return nil
}
-func (t *remoteTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
@@ -59,7 +56,7 @@ func (t *remoteTarget) Close() (*transformer.AccessIdentifiers, error) {
var prm internalclient.PutObjectPrm
- prm.SetContext(t.ctx)
+ prm.SetContext(ctx)
prm.SetClient(c)
prm.SetPrivateKey(t.privateKey)
prm.SetSessionToken(t.commonPrm.SessionToken())
@@ -110,7 +107,6 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
}
t := &remoteTarget{
- ctx: ctx,
privateKey: key,
clientConstructor: s.clientConstructor,
}
@@ -122,7 +118,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
if err := t.WriteObject(p.obj, objectcore.ContentMeta{}); err != nil {
return fmt.Errorf("(%T) could not send object header: %w", s, err)
- } else if _, err := t.Close(); err != nil {
+ } else if _, err := t.Close(ctx); err != nil {
return fmt.Errorf("(%T) could not send object: %w", s, err)
}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index b74c97d49..567a3fea1 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,8 +1,6 @@
package putsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -79,10 +77,9 @@ func NewService(opts ...Option) *Service {
}
}
-func (p *Service) Put(ctx context.Context) (*Streamer, error) {
+func (p *Service) Put() (*Streamer, error) {
return &Streamer{
cfg: p.cfg,
- ctx: ctx,
}, nil
}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 915b718a3..678cff572 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -16,12 +16,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-// nolint: containedctx
type Streamer struct {
*cfg
- ctx context.Context
-
sessionKey *ecdsa.PrivateKey
target transformer.ObjectTarget
@@ -232,7 +229,6 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
}
rt := &remoteTarget{
- ctx: p.ctx,
privateKey: p.sessionKey,
commonPrm: prm.common,
clientConstructor: p.clientConstructor,
@@ -250,24 +246,24 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
}
}
-func (p *Streamer) SendChunk(prm *PutChunkPrm) error {
+func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
}
- if _, err := p.target.Write(prm.chunk); err != nil {
+ if _, err := p.target.Write(ctx, prm.chunk); err != nil {
return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err)
}
return nil
}
-func (p *Streamer) Close() (*PutResponse, error) {
+func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
if p.target == nil {
return nil, errNotInit
}
- ids, err := p.target.Close()
+ ids, err := p.target.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not close object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index 7d0dfc613..656f8df9c 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -1,7 +1,6 @@
package putsvc
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -36,8 +35,8 @@ func NewService(opts ...Option) *Service {
}
// Put calls internal service and returns v2 object streamer.
-func (s *Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *Service) Put() (object.PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("(%T) could not open object put stream: %w", s, err)
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 85827cd4c..65846ea9f 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
@@ -32,7 +33,7 @@ type sizes struct {
writtenPayload uint64 // sum size of already cached chunks
}
-func (s *streamer) Send(req *object.PutRequest) (err error) {
+func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) {
switch v := req.GetBody().GetObjectPart().(type) {
case *object.PutObjectPartInit:
var initPrm *putsvc.PutInitPrm
@@ -71,7 +72,7 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
}
}
- if err = s.stream.SendChunk(toChunkPrm(v)); err != nil {
+ if err = s.stream.SendChunk(ctx, toChunkPrm(v)); err != nil {
err = fmt.Errorf("(%T) could not send payload chunk: %w", s, err)
}
@@ -103,7 +104,7 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
return signature.SignServiceMessage(key, req)
}
-func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
+func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
@@ -111,7 +112,7 @@ func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
}
}
- resp, err := s.stream.Close()
+ resp, err := s.stream.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not object put stream: %w", s, err)
}
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/put/validation.go
index 2d6ada5a1..70c6974d3 100644
--- a/pkg/services/object/put/validation.go
+++ b/pkg/services/object/put/validation.go
@@ -2,6 +2,7 @@ package putsvc
import (
"bytes"
+ "context"
"crypto/sha256"
"errors"
"fmt"
@@ -92,7 +93,7 @@ func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error {
return nil
}
-func (t *validatingTarget) Write(p []byte) (n int, err error) {
+func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) {
chunkLn := uint64(len(p))
if !t.unpreparedObject {
@@ -107,7 +108,7 @@ func (t *validatingTarget) Write(p []byte) (n int, err error) {
}
}
- n, err = t.nextTarget.Write(p)
+ n, err = t.nextTarget.Write(ctx, p)
if err == nil {
t.writtenPayload += uint64(n)
}
@@ -115,7 +116,7 @@ func (t *validatingTarget) Write(p []byte) (n int, err error) {
return
}
-func (t *validatingTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
if !t.unpreparedObject {
// check payload size correctness
if t.payloadSz != t.writtenPayload {
@@ -127,5 +128,5 @@ func (t *validatingTarget) Close() (*transformer.AccessIdentifiers, error) {
}
}
- return t.nextTarget.Close()
+ return t.nextTarget.Close(ctx)
}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 4da2b23a7..def934ea6 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -59,12 +59,12 @@ func (s *ResponseService) Get(req *object.GetRequest, stream GetObjectStream) er
})
}
-func (s *putStreamResponser) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
+func (s *putStreamResponser) Send(ctx context.Context, req *object.PutRequest) error {
+ return s.stream.Send(ctx, req)
}
-func (s *putStreamResponser) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
+func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not receive response: %w", s, err)
}
@@ -72,19 +72,19 @@ func (s *putStreamResponser) CloseAndRecv() (*object.PutResponse, error) {
return r.(*object.PutResponse), nil
}
-func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *ResponseService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
return &putStreamResponser{
stream: s.respSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
+ func(ctx context.Context, req any) error {
+ return stream.Send(ctx, req.(*object.PutRequest))
},
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
+ func(ctx context.Context) (util.ResponseMessage, error) {
+ return stream.CloseAndRecv(ctx)
},
),
}, nil
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index d95c6c906..ccce9c4f4 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -27,15 +27,15 @@ type SearchStream interface {
// PutObjectStream is an interface of FrostFS API v2 compatible client's object streamer.
type PutObjectStream interface {
- Send(*object.PutRequest) error
- CloseAndRecv() (*object.PutResponse, error)
+ Send(context.Context, *object.PutRequest) error
+ CloseAndRecv(context.Context) (*object.PutResponse, error)
}
// ServiceServer is an interface of utility
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put(context.Context) (PutObjectStream, error)
+ Put() (PutObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 585fc659a..9d66c76ba 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -70,12 +70,12 @@ func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error
)
}
-func (s *putStreamSigner) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
+func (s *putStreamSigner) Send(ctx context.Context, req *object.PutRequest) error {
+ return s.stream.Send(ctx, req)
}
-func (s *putStreamSigner) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
+func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
if err != nil {
return nil, fmt.Errorf("could not receive response: %w", err)
}
@@ -83,19 +83,19 @@ func (s *putStreamSigner) CloseAndRecv() (*object.PutResponse, error) {
return r.(*object.PutResponse), nil
}
-func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *SignService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
return &putStreamSigner{
stream: s.sigSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
+ func(ctx context.Context, req any) error {
+ return stream.Send(ctx, req.(*object.PutRequest))
},
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
+ func(ctx context.Context) (util.ResponseMessage, error) {
+ return stream.CloseAndRecv(ctx)
},
func() util.ResponseMessage {
return new(object.PutResponse)
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 3836103de..a7d1c486a 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -87,8 +87,8 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
- return c.next.Put(ctx)
+func (c TransportSplitter) Put() (PutObjectStream, error) {
+ return c.next.Put()
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
diff --git a/pkg/services/object_manager/transformer/fmt.go b/pkg/services/object_manager/transformer/fmt.go
index c9b5dc967..462cc7474 100644
--- a/pkg/services/object_manager/transformer/fmt.go
+++ b/pkg/services/object_manager/transformer/fmt.go
@@ -1,6 +1,7 @@
package transformer
import (
+ "context"
"crypto/ecdsa"
"fmt"
@@ -53,15 +54,15 @@ func (f *formatter) WriteHeader(obj *object.Object) error {
return nil
}
-func (f *formatter) Write(p []byte) (n int, err error) {
- n, err = f.prm.NextTarget.Write(p)
+func (f *formatter) Write(ctx context.Context, p []byte) (n int, err error) {
+ n, err = f.prm.NextTarget.Write(ctx, p)
f.sz += uint64(n)
return
}
-func (f *formatter) Close() (*AccessIdentifiers, error) {
+func (f *formatter) Close(ctx context.Context) (*AccessIdentifiers, error) {
curEpoch := f.prm.NetworkState.CurrentEpoch()
ver := version.Current()
@@ -100,7 +101,7 @@ func (f *formatter) Close() (*AccessIdentifiers, error) {
return nil, fmt.Errorf("could not write header to next target: %w", err)
}
- if _, err := f.prm.NextTarget.Close(); err != nil {
+ if _, err := f.prm.NextTarget.Close(ctx); err != nil {
return nil, fmt.Errorf("could not close next target: %w", err)
}
diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go
index 7b717d3df..199f5d0c1 100644
--- a/pkg/services/object_manager/transformer/transformer.go
+++ b/pkg/services/object_manager/transformer/transformer.go
@@ -1,10 +1,10 @@
package transformer
import (
+ "context"
"crypto/sha256"
"fmt"
"hash"
- "io"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -27,7 +27,7 @@ type payloadSizeLimiter struct {
previous []oid.ID
- chunkWriter io.Writer
+ chunkWriter writer
splitID *object.SplitID
@@ -64,16 +64,16 @@ func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error {
return nil
}
-func (s *payloadSizeLimiter) Write(p []byte) (int, error) {
- if err := s.writeChunk(p); err != nil {
+func (s *payloadSizeLimiter) Write(ctx context.Context, p []byte) (int, error) {
+ if err := s.writeChunk(ctx, p); err != nil {
return 0, err
}
return len(p), nil
}
-func (s *payloadSizeLimiter) Close() (*AccessIdentifiers, error) {
- return s.release(true)
+func (s *payloadSizeLimiter) Close(ctx context.Context) (*AccessIdentifiers, error) {
+ return s.release(ctx, true)
}
func (s *payloadSizeLimiter) initialize() {
@@ -117,19 +117,19 @@ func (s *payloadSizeLimiter) initializeCurrent() {
s.currentHashers = payloadHashersForObject(s.current, s.withoutHomomorphicHash)
// compose multi-writer from target and all payload hashers
- ws := make([]io.Writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
+ ws := make([]writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
ws = append(ws, s.target)
for i := range s.currentHashers {
- ws = append(ws, s.currentHashers[i].hasher)
+ ws = append(ws, newWriter(s.currentHashers[i].hasher))
}
for i := range s.parentHashers {
- ws = append(ws, s.parentHashers[i].hasher)
+ ws = append(ws, newWriter(s.parentHashers[i].hasher))
}
- s.chunkWriter = io.MultiWriter(ws...)
+ s.chunkWriter = newMultiWriter(ws...)
}
func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []*payloadChecksumHasher {
@@ -174,7 +174,7 @@ func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []
return hashers
}
-func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error) {
+func (s *payloadSizeLimiter) release(ctx context.Context, finalize bool) (*AccessIdentifiers, error) {
// Arg finalize is true only when called from Close method.
// We finalize parent and generate linking objects only if it is more
// than 1 object in split-chain.
@@ -194,7 +194,7 @@ func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error)
return nil, fmt.Errorf("could not write header: %w", err)
}
- ids, err := s.target.Close()
+ ids, err := s.target.Close(ctx)
if err != nil {
return nil, fmt.Errorf("could not close target: %w", err)
}
@@ -207,7 +207,7 @@ func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error)
s.initializeLinking(ids.Parent())
s.initializeCurrent()
- if _, err := s.release(false); err != nil {
+ if _, err := s.release(ctx, false); err != nil {
return nil, fmt.Errorf("could not release linking object: %w", err)
}
}
@@ -228,7 +228,7 @@ func (s *payloadSizeLimiter) initializeLinking(parHdr *object.Object) {
s.current.SetSplitID(s.splitID)
}
-func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
+func (s *payloadSizeLimiter) writeChunk(ctx context.Context, chunk []byte) error {
// statement is true if the previous write of bytes reached exactly the boundary.
if s.written > 0 && s.written%s.maxSize == 0 {
if s.written == s.maxSize {
@@ -236,7 +236,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
}
// we need to release current object
- if _, err := s.release(false); err != nil {
+ if _, err := s.release(ctx, false); err != nil {
return fmt.Errorf("could not release object: %w", err)
}
@@ -255,7 +255,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
cut = leftToEdge
}
- if _, err := s.chunkWriter.Write(chunk[:cut]); err != nil {
+ if _, err := s.chunkWriter.Write(ctx, chunk[:cut]); err != nil {
return fmt.Errorf("could not write chunk to target: %w", err)
}
@@ -264,7 +264,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
// if there are more bytes in buffer we call method again to start filling another object
if ln > leftToEdge {
- return s.writeChunk(chunk[cut:])
+ return s.writeChunk(ctx, chunk[cut:])
}
return nil
diff --git a/pkg/services/object_manager/transformer/types.go b/pkg/services/object_manager/transformer/types.go
index 0fa3b6436..3e6e2feff 100644
--- a/pkg/services/object_manager/transformer/types.go
+++ b/pkg/services/object_manager/transformer/types.go
@@ -1,7 +1,7 @@
package transformer
import (
- "io"
+ "context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -35,7 +35,7 @@ type ObjectTarget interface {
// Can be called multiple times.
//
// Must not be called after Close call.
- io.Writer
+ Write(ctx context.Context, p []byte) (n int, err error)
// Close is used to finish object writing.
//
@@ -45,7 +45,7 @@ type ObjectTarget interface {
// Must be called no more than once. Control remains with the caller.
// Re-calling can lead to undefined behavior
// that depends on the implementation.
- Close() (*AccessIdentifiers, error)
+ Close(ctx context.Context) (*AccessIdentifiers, error)
}
// TargetInitializer represents ObjectTarget constructor.
diff --git a/pkg/services/object_manager/transformer/writer.go b/pkg/services/object_manager/transformer/writer.go
new file mode 100644
index 000000000..27aed16ff
--- /dev/null
+++ b/pkg/services/object_manager/transformer/writer.go
@@ -0,0 +1,52 @@
+package transformer
+
+import (
+ "context"
+ "io"
+)
+
+type writer interface {
+ Write(ctx context.Context, p []byte) (n int, err error)
+}
+
+type multiWriter struct {
+ writers []writer
+}
+
+func (t *multiWriter) Write(ctx context.Context, p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ n, err = w.Write(ctx, p)
+ if err != nil {
+ return
+ }
+ if n != len(p) {
+ err = io.ErrShortWrite
+ return
+ }
+ }
+ return len(p), nil
+}
+
+func newMultiWriter(writers ...writer) writer {
+ allWriters := make([]writer, 0, len(writers))
+ for _, w := range writers {
+ if mw, ok := w.(*multiWriter); ok {
+ allWriters = append(allWriters, mw.writers...)
+ } else {
+ allWriters = append(allWriters, w)
+ }
+ }
+ return &multiWriter{allWriters}
+}
+
+type writerWrapper struct {
+ Writer io.Writer
+}
+
+func (w *writerWrapper) Write(_ context.Context, p []byte) (n int, err error) {
+ return w.Writer.Write(p)
+}
+
+func newWriter(w io.Writer) writer {
+ return &writerWrapper{Writer: w}
+}
diff --git a/pkg/services/util/response/client_stream.go b/pkg/services/util/response/client_stream.go
index f167f005a..b541c73db 100644
--- a/pkg/services/util/response/client_stream.go
+++ b/pkg/services/util/response/client_stream.go
@@ -1,6 +1,7 @@
package response
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
@@ -17,8 +18,8 @@ type ClientMessageStreamer struct {
}
// Send calls send method of internal streamer.
-func (s *ClientMessageStreamer) Send(req any) error {
- if err := s.send(req); err != nil {
+func (s *ClientMessageStreamer) Send(ctx context.Context, req any) error {
+ if err := s.send(ctx, req); err != nil {
return fmt.Errorf("(%T) could not send the request: %w", s, err)
}
return nil
@@ -26,8 +27,8 @@ func (s *ClientMessageStreamer) Send(req any) error {
// CloseAndRecv closes internal stream, receivers the response,
// sets meta values and returns the result.
-func (s *ClientMessageStreamer) CloseAndRecv() (util.ResponseMessage, error) {
- resp, err := s.close()
+func (s *ClientMessageStreamer) CloseAndRecv(ctx context.Context) (util.ResponseMessage, error) {
+ resp, err := s.close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not close stream and receive response: %w", s, err)
}
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index 2478e6256..cb4be3084 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -37,9 +37,9 @@ var ErrAbortStream = errors.New("abort message stream")
type ResponseConstructor func() ResponseMessage
-type RequestMessageWriter func(any) error
+type RequestMessageWriter func(context.Context, any) error
-type ClientStreamCloser func() (ResponseMessage, error)
+type ClientStreamCloser func(context.Context) (ResponseMessage, error)
type RequestMessageStreamer struct {
key *ecdsa.PrivateKey
@@ -61,7 +61,7 @@ func NewUnarySignService(key *ecdsa.PrivateKey) *SignService {
}
}
-func (s *RequestMessageStreamer) Send(req any) error {
+func (s *RequestMessageStreamer) Send(ctx context.Context, req any) error {
// req argument should be strengthen with type RequestMessage
s.statusSupported = isStatusSupported(req.(RequestMessage)) // panic is OK here for now
@@ -71,7 +71,7 @@ func (s *RequestMessageStreamer) Send(req any) error {
if err = signature.VerifyServiceMessage(req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
- err = s.send(req)
+ err = s.send(ctx, req)
}
if err != nil {
@@ -87,7 +87,7 @@ func (s *RequestMessageStreamer) Send(req any) error {
return nil
}
-func (s *RequestMessageStreamer) CloseAndRecv() (ResponseMessage, error) {
+func (s *RequestMessageStreamer) CloseAndRecv(ctx context.Context) (ResponseMessage, error) {
var (
resp ResponseMessage
err error
@@ -96,7 +96,7 @@ func (s *RequestMessageStreamer) CloseAndRecv() (ResponseMessage, error) {
if s.sendErr != nil {
err = s.sendErr
} else {
- resp, err = s.close()
+ resp, err = s.close(ctx)
if err != nil {
err = fmt.Errorf("could not close stream and receive response: %w", err)
}
From f6614e2a0b3a6663bbf27adc4d30deed9b3b5e3f Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 15:40:52 +0300
Subject: [PATCH 0010/1943] [#199] putsvc: Refactor streamer initialization
Resolve funlen linter for initTarget method
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/streamer.go | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 678cff572..fed161e03 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -51,7 +51,6 @@ func (p *Streamer) MaxObjectSize() uint64 {
return p.maxPayloadSz
}
-// nolint: funlen
func (p *Streamer) initTarget(prm *PutInitPrm) error {
// prevent re-calling
if p.target != nil {
@@ -69,19 +68,26 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error {
}
if prm.hdr.Signature() != nil {
- p.relay = prm.relay
+ return p.initUntrustedTarget(prm)
+ }
+ return p.initTrustedTarget(prm)
+}
- // prepare untrusted-Put object target
- p.target = &validatingTarget{
- nextTarget: p.newCommonTarget(prm),
- fmt: p.fmtValidator,
+func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
+ p.relay = prm.relay
- maxPayloadSz: p.maxPayloadSz,
- }
+ // prepare untrusted-Put object target
+ p.target = &validatingTarget{
+ nextTarget: p.newCommonTarget(prm),
+ fmt: p.fmtValidator,
- return nil
+ maxPayloadSz: p.maxPayloadSz,
}
+ return nil
+}
+
+func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
sToken := prm.common.SessionToken()
// prepare trusted-Put object target
From 14d894178e29be1e97de160905f3fc7504bd7693 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 12:24:01 +0300
Subject: [PATCH 0011/1943] [#199] putsvc: Refactor placement iterator
Resolve funlen linter for iteratePlacement method
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 111 ++++++++++++-------------
1 file changed, 53 insertions(+), 58 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 47104b323..3b5f4ec53 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -164,7 +164,6 @@ func (t *distributedTarget) sendObject(node nodeDesc) error {
return nil
}
-// nolint: funlen
func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
id, _ := t.obj.ID()
@@ -175,72 +174,22 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
return nil, fmt.Errorf("(%T) could not create object placement traverser: %w", t, err)
}
- var resErr atomic.Value
+ resErr := &atomic.Value{}
-loop:
for {
addrs := traverser.Next()
if len(addrs) == 0 {
break
}
- wg := new(sync.WaitGroup)
-
- for i := range addrs {
- if t.traversal.processed(addrs[i]) {
- // it can happen only during additional container broadcast
- continue
- }
-
- wg.Add(1)
-
- addr := addrs[i]
-
- isLocal := t.isLocalKey(addr.PublicKey())
-
- var workerPool util.WorkerPool
-
- if isLocal {
- workerPool = t.localPool
- } else {
- workerPool = t.remotePool
- }
-
- if err := workerPool.Submit(func() {
- defer wg.Done()
-
- err := f(nodeDesc{local: isLocal, info: addr})
-
- // mark the container node as processed in order to exclude it
- // in subsequent container broadcast. Note that we don't
- // process this node during broadcast if primary placement
- // on it failed.
- t.traversal.submitProcessed(addr)
-
- if err != nil {
- resErr.Store(err)
- svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
- return
- }
-
- traverser.SubmitSuccess()
- }); err != nil {
- wg.Done()
-
- svcutil.LogWorkerPoolError(t.log, "PUT", err)
-
- break loop
- }
+ if t.iterateAddresses(traverser, addrs, f, resErr) {
+ break
}
-
- wg.Wait()
}
if !traverser.Success() {
var err errIncompletePut
-
err.singleErr, _ = resErr.Load().(error)
-
return nil, err
}
@@ -248,10 +197,7 @@ loop:
if t.traversal.submitPrimaryPlacementFinish() {
_, err = t.iteratePlacement(f)
if err != nil {
- t.log.Error("additional container broadcast failure",
- zap.Error(err),
- )
-
+ t.log.Error("additional container broadcast failure", zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -261,3 +207,52 @@ loop:
return new(transformer.AccessIdentifiers).
WithSelfID(id), nil
}
+
+func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, addrs []placement.Node, f func(nodeDesc) error, resErr *atomic.Value) bool {
+ wg := &sync.WaitGroup{}
+
+ for i := range addrs {
+ if t.traversal.processed(addrs[i]) {
+ // it can happen only during additional container broadcast
+ continue
+ }
+
+ wg.Add(1)
+
+ addr := addrs[i]
+ isLocal := t.isLocalKey(addr.PublicKey())
+
+ workerPool := t.remotePool
+ if isLocal {
+ workerPool = t.localPool
+ }
+
+ if err := workerPool.Submit(func() {
+ defer wg.Done()
+
+ err := f(nodeDesc{local: isLocal, info: addr})
+
+ // mark the container node as processed in order to exclude it
+ // in subsequent container broadcast. Note that we don't
+ // process this node during broadcast if primary placement
+ // on it failed.
+ t.traversal.submitProcessed(addr)
+
+ if err != nil {
+ resErr.Store(err)
+ svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
+ return
+ }
+
+ traverser.SubmitSuccess()
+ }); err != nil {
+ wg.Done()
+ svcutil.LogWorkerPoolError(t.log, "PUT", err)
+ return true
+ }
+ }
+
+ wg.Wait()
+
+ return false
+}
From cecea8053a8e394994a8f85765a7be07b0da394f Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 13:09:44 +0300
Subject: [PATCH 0012/1943] [#199] putsvc: Refactor streamer pool
Resolve staticcheck linter for putBytesPool
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 6 +++---
pkg/services/object/put/pool.go | 16 ++++++++++------
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 3b5f4ec53..e4566157e 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -28,7 +28,7 @@ type distributedTarget struct {
obj *objectSDK.Object
objMeta object.ContentMeta
- payload []byte
+ payload *payload
nodeTargetInitializer func(nodeDesc) preparedObjectTarget
@@ -122,7 +122,7 @@ func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
}
func (t *distributedTarget) Write(p []byte) (n int, err error) {
- t.payload = append(t.payload, p...)
+ t.payload.Data = append(t.payload.Data, p...)
return len(p), nil
}
@@ -133,7 +133,7 @@ func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
t.payload = nil
}()
- t.obj.SetPayload(t.payload)
+ t.obj.SetPayload(t.payload.Data)
var err error
diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/put/pool.go
index 705273227..5726856e5 100644
--- a/pkg/services/object/put/pool.go
+++ b/pkg/services/object/put/pool.go
@@ -6,15 +6,19 @@ import (
const defaultAllocSize = 1024
+type payload struct {
+ Data []byte
+}
+
var putBytesPool = &sync.Pool{
- New: func() any { return make([]byte, 0, defaultAllocSize) },
+ New: func() any { return &payload{Data: make([]byte, 0, defaultAllocSize)} },
}
-func getPayload() []byte {
- return putBytesPool.Get().([]byte)
+func getPayload() *payload {
+ return putBytesPool.Get().(*payload)
}
-func putPayload(p []byte) {
- //nolint:staticcheck
- putBytesPool.Put(p[:0])
+func putPayload(p *payload) {
+ p.Data = p.Data[:0]
+ putBytesPool.Put(p)
}
From 27bdddc48f1ae68d9eee638b28906fab1ee22938 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 14:23:53 +0300
Subject: [PATCH 0013/1943] [#199] putsvc: Refactor put object
Resolve containedctx linter for streamer and remote target
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/network/transport/object/grpc/service.go | 8 +--
pkg/services/object/acl/v2/service.go | 12 ++---
pkg/services/object/common.go | 4 +-
pkg/services/object/delete/util.go | 6 +--
pkg/services/object/metrics.go | 14 ++---
pkg/services/object/put/distributed.go | 23 ++++----
pkg/services/object/put/local.go | 3 +-
pkg/services/object/put/remote.go | 10 ++--
pkg/services/object/put/service.go | 5 +-
pkg/services/object/put/streamer.go | 12 ++---
pkg/services/object/put/v2/service.go | 5 +-
pkg/services/object/put/v2/streamer.go | 9 ++--
pkg/services/object/put/validation.go | 9 ++--
pkg/services/object/response.go | 20 +++----
pkg/services/object/server.go | 6 +--
pkg/services/object/sign.go | 20 +++----
pkg/services/object/transport_splitter.go | 4 +-
.../object_manager/transformer/fmt.go | 9 ++--
.../object_manager/transformer/transformer.go | 34 ++++++------
.../object_manager/transformer/types.go | 6 +--
.../object_manager/transformer/writer.go | 52 +++++++++++++++++++
pkg/services/util/response/client_stream.go | 9 ++--
pkg/services/util/sign.go | 12 ++---
24 files changed, 171 insertions(+), 125 deletions(-)
create mode 100644 pkg/services/object_manager/transformer/writer.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 8680aac28..3b6bdcc7d 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -70,8 +70,8 @@ func (c *cfg) MaxObjectSize() uint64 {
return sz
}
-func (s *objectSvc) Put(ctx context.Context) (objectService.PutObjectStream, error) {
- return s.put.Put(ctx)
+func (s *objectSvc) Put() (objectService.PutObjectStream, error) {
+ return s.put.Put()
}
func (s *objectSvc) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) {
diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go
index 82e323a3c..7fa60f99c 100644
--- a/pkg/network/transport/object/grpc/service.go
+++ b/pkg/network/transport/object/grpc/service.go
@@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server {
// Put opens internal Object service Put stream and overtakes data from gRPC stream to it.
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
- stream, err := s.srv.Put(gStream.Context())
+ stream, err := s.srv.Put()
if err != nil {
return err
}
@@ -35,7 +35,7 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
req, err := gStream.Recv()
if err != nil {
if errors.Is(err, io.EOF) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
@@ -51,9 +51,9 @@ func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
return err
}
- if err := stream.Send(putReq); err != nil {
+ if err := stream.Send(gStream.Context(), putReq); err != nil {
if errors.Is(err, util.ErrAbortStream) {
- resp, err := stream.CloseAndRecv()
+ resp, err := stream.CloseAndRecv(gStream.Context())
if err != nil {
return err
}
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 6bf8c4405..1e451a99f 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -165,8 +165,8 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream
})
}
-func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- streamer, err := b.next.Put(ctx)
+func (b Service) Put() (object.PutObjectStream, error) {
+ streamer, err := b.next.Put()
return putStreamBasicChecker{
source: &b,
@@ -444,7 +444,7 @@ func (b Service) GetRangeHash(
}
// nolint: funlen
-func (p putStreamBasicChecker) Send(request *objectV2.PutRequest) error {
+func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
body := request.GetBody()
if body == nil {
return errEmptyBody
@@ -531,11 +531,11 @@ func (p putStreamBasicChecker) Send(request *objectV2.PutRequest) error {
}
}
- return p.next.Send(request)
+ return p.next.Send(ctx, request)
}
-func (p putStreamBasicChecker) CloseAndRecv() (*objectV2.PutResponse, error) {
- return p.next.CloseAndRecv()
+func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
+ return p.next.CloseAndRecv(ctx)
}
func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error {
diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go
index e797f1a64..5b139d8eb 100644
--- a/pkg/services/object/common.go
+++ b/pkg/services/object/common.go
@@ -42,12 +42,12 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error {
return x.nextHandler.Get(req, stream)
}
-func (x *Common) Put(ctx context.Context) (PutObjectStream, error) {
+func (x *Common) Put() (PutObjectStream, error) {
if x.state.IsMaintenance() {
return nil, errMaintenance
}
- return x.nextHandler.Put(ctx)
+ return x.nextHandler.Put()
}
func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index a8ebb3065..cc5433740 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -108,7 +108,7 @@ func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error {
}
func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
- streamer, err := (*putsvc.Service)(w).Put(exec.context())
+ streamer, err := (*putsvc.Service)(w).Put()
if err != nil {
return nil, err
}
@@ -124,12 +124,12 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
return nil, err
}
- err = streamer.SendChunk(new(putsvc.PutChunkPrm).WithChunk(payload))
+ err = streamer.SendChunk(exec.context(), new(putsvc.PutChunkPrm).WithChunk(payload))
if err != nil {
return nil, err
}
- r, err := streamer.Close()
+ r, err := streamer.Close(exec.context())
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go
index 9f15e834a..3ea16dafd 100644
--- a/pkg/services/object/metrics.go
+++ b/pkg/services/object/metrics.go
@@ -75,11 +75,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er
return
}
-func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
+func (m MetricCollector) Put() (PutObjectStream, error) {
if m.enabled {
t := time.Now()
- stream, err := m.next.Put(ctx)
+ stream, err := m.next.Put()
if err != nil {
return nil, err
}
@@ -90,7 +90,7 @@ func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) {
start: t,
}, nil
}
- return m.next.Put(ctx)
+ return m.next.Put()
}
func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
@@ -179,17 +179,17 @@ func (s getStreamMetric) Send(resp *object.GetResponse) error {
return s.stream.Send(resp)
}
-func (s putStreamMetric) Send(req *object.PutRequest) error {
+func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error {
chunk, ok := req.GetBody().GetObjectPart().(*object.PutObjectPartChunk)
if ok {
s.metrics.AddPutPayload(len(chunk.GetChunk()))
}
- return s.stream.Send(req)
+ return s.stream.Send(ctx, req)
}
-func (s putStreamMetric) CloseAndRecv() (*object.PutResponse, error) {
- res, err := s.stream.CloseAndRecv()
+func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ res, err := s.stream.CloseAndRecv(ctx)
s.metrics.IncPutReqCounter(err == nil)
s.metrics.AddPutReqDuration(time.Since(s.start))
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index e4566157e..d8b59487e 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
"sync"
"sync/atomic"
@@ -17,7 +18,7 @@ import (
type preparedObjectTarget interface {
WriteObject(*objectSDK.Object, object.ContentMeta) error
- Close() (*transformer.AccessIdentifiers, error)
+ Close(ctx context.Context) (*transformer.AccessIdentifiers, error)
}
type distributedTarget struct {
@@ -121,13 +122,13 @@ func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
return nil
}
-func (t *distributedTarget) Write(p []byte) (n int, err error) {
+func (t *distributedTarget) Write(_ context.Context, p []byte) (n int, err error) {
t.payload.Data = append(t.payload.Data, p...)
return len(p), nil
}
-func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
defer func() {
putPayload(t.payload)
t.payload = nil
@@ -146,10 +147,10 @@ func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
t.traversal.extraBroadcastEnabled = true
}
- return t.iteratePlacement(t.sendObject)
+ return t.iteratePlacement(ctx)
}
-func (t *distributedTarget) sendObject(node nodeDesc) error {
+func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
if !node.local && t.relay != nil {
return t.relay(node)
}
@@ -158,13 +159,13 @@ func (t *distributedTarget) sendObject(node nodeDesc) error {
if err := target.WriteObject(t.obj, t.objMeta); err != nil {
return fmt.Errorf("could not write header: %w", err)
- } else if _, err := target.Close(); err != nil {
+ } else if _, err := target.Close(ctx); err != nil {
return fmt.Errorf("could not close object stream: %w", err)
}
return nil
}
-func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
+func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer.AccessIdentifiers, error) {
id, _ := t.obj.ID()
traverser, err := placement.NewTraverser(
@@ -182,7 +183,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
break
}
- if t.iterateAddresses(traverser, addrs, f, resErr) {
+ if t.iterateAddresses(ctx, traverser, addrs, resErr) {
break
}
}
@@ -195,7 +196,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
// perform additional container broadcast if needed
if t.traversal.submitPrimaryPlacementFinish() {
- _, err = t.iteratePlacement(f)
+ _, err = t.iteratePlacement(ctx)
if err != nil {
t.log.Error("additional container broadcast failure", zap.Error(err))
// we don't fail primary operation because of broadcast failure
@@ -208,7 +209,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
WithSelfID(id), nil
}
-func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, addrs []placement.Node, f func(nodeDesc) error, resErr *atomic.Value) bool {
+func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool {
wg := &sync.WaitGroup{}
for i := range addrs {
@@ -230,7 +231,7 @@ func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, add
if err := workerPool.Submit(func() {
defer wg.Done()
- err := f(nodeDesc{local: isLocal, info: addr})
+ err := t.sendObject(ctx, nodeDesc{local: isLocal, info: addr})
// mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go
index f344f77e9..12e3a2eee 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/put/local.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -38,7 +39,7 @@ func (t *localTarget) WriteObject(obj *object.Object, meta objectCore.ContentMet
return nil
}
-func (t *localTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *localTarget) Close(_ context.Context) (*transformer.AccessIdentifiers, error) {
switch t.meta.Type() {
case object.TypeTombstone:
err := t.storage.Delete(objectCore.AddressOf(t.obj), t.meta.Objects())
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/put/remote.go
index 760de7508..6933abca6 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/put/remote.go
@@ -15,10 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
-// nolint: containedctx
type remoteTarget struct {
- ctx context.Context
-
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
@@ -51,7 +48,7 @@ func (t *remoteTarget) WriteObject(obj *object.Object, _ objectcore.ContentMeta)
return nil
}
-func (t *remoteTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
return nil, fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
@@ -59,7 +56,7 @@ func (t *remoteTarget) Close() (*transformer.AccessIdentifiers, error) {
var prm internalclient.PutObjectPrm
- prm.SetContext(t.ctx)
+ prm.SetContext(ctx)
prm.SetClient(c)
prm.SetPrivateKey(t.privateKey)
prm.SetSessionToken(t.commonPrm.SessionToken())
@@ -110,7 +107,6 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
}
t := &remoteTarget{
- ctx: ctx,
privateKey: key,
clientConstructor: s.clientConstructor,
}
@@ -122,7 +118,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
if err := t.WriteObject(p.obj, objectcore.ContentMeta{}); err != nil {
return fmt.Errorf("(%T) could not send object header: %w", s, err)
- } else if _, err := t.Close(); err != nil {
+ } else if _, err := t.Close(ctx); err != nil {
return fmt.Errorf("(%T) could not send object: %w", s, err)
}
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index b74c97d49..567a3fea1 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,8 +1,6 @@
package putsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -79,10 +77,9 @@ func NewService(opts ...Option) *Service {
}
}
-func (p *Service) Put(ctx context.Context) (*Streamer, error) {
+func (p *Service) Put() (*Streamer, error) {
return &Streamer{
cfg: p.cfg,
- ctx: ctx,
}, nil
}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 915b718a3..678cff572 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -16,12 +16,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-// nolint: containedctx
type Streamer struct {
*cfg
- ctx context.Context
-
sessionKey *ecdsa.PrivateKey
target transformer.ObjectTarget
@@ -232,7 +229,6 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
}
rt := &remoteTarget{
- ctx: p.ctx,
privateKey: p.sessionKey,
commonPrm: prm.common,
clientConstructor: p.clientConstructor,
@@ -250,24 +246,24 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
}
}
-func (p *Streamer) SendChunk(prm *PutChunkPrm) error {
+func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
}
- if _, err := p.target.Write(prm.chunk); err != nil {
+ if _, err := p.target.Write(ctx, prm.chunk); err != nil {
return fmt.Errorf("(%T) could not write payload chunk to target: %w", p, err)
}
return nil
}
-func (p *Streamer) Close() (*PutResponse, error) {
+func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
if p.target == nil {
return nil, errNotInit
}
- ids, err := p.target.Close()
+ ids, err := p.target.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not close object target: %w", p, err)
}
diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go
index 7d0dfc613..656f8df9c 100644
--- a/pkg/services/object/put/v2/service.go
+++ b/pkg/services/object/put/v2/service.go
@@ -1,7 +1,6 @@
package putsvc
import (
- "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -36,8 +35,8 @@ func NewService(opts ...Option) *Service {
}
// Put calls internal service and returns v2 object streamer.
-func (s *Service) Put(ctx context.Context) (object.PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *Service) Put() (object.PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("(%T) could not open object put stream: %w", s, err)
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 85827cd4c..65846ea9f 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -1,6 +1,7 @@
package putsvc
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
@@ -32,7 +33,7 @@ type sizes struct {
writtenPayload uint64 // sum size of already cached chunks
}
-func (s *streamer) Send(req *object.PutRequest) (err error) {
+func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) {
switch v := req.GetBody().GetObjectPart().(type) {
case *object.PutObjectPartInit:
var initPrm *putsvc.PutInitPrm
@@ -71,7 +72,7 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
}
}
- if err = s.stream.SendChunk(toChunkPrm(v)); err != nil {
+ if err = s.stream.SendChunk(ctx, toChunkPrm(v)); err != nil {
err = fmt.Errorf("(%T) could not send payload chunk: %w", s, err)
}
@@ -103,7 +104,7 @@ func (s *streamer) Send(req *object.PutRequest) (err error) {
return signature.SignServiceMessage(key, req)
}
-func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
+func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
@@ -111,7 +112,7 @@ func (s *streamer) CloseAndRecv() (*object.PutResponse, error) {
}
}
- resp, err := s.stream.Close()
+ resp, err := s.stream.Close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not object put stream: %w", s, err)
}
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/put/validation.go
index 2d6ada5a1..70c6974d3 100644
--- a/pkg/services/object/put/validation.go
+++ b/pkg/services/object/put/validation.go
@@ -2,6 +2,7 @@ package putsvc
import (
"bytes"
+ "context"
"crypto/sha256"
"errors"
"fmt"
@@ -92,7 +93,7 @@ func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error {
return nil
}
-func (t *validatingTarget) Write(p []byte) (n int, err error) {
+func (t *validatingTarget) Write(ctx context.Context, p []byte) (n int, err error) {
chunkLn := uint64(len(p))
if !t.unpreparedObject {
@@ -107,7 +108,7 @@ func (t *validatingTarget) Write(p []byte) (n int, err error) {
}
}
- n, err = t.nextTarget.Write(p)
+ n, err = t.nextTarget.Write(ctx, p)
if err == nil {
t.writtenPayload += uint64(n)
}
@@ -115,7 +116,7 @@ func (t *validatingTarget) Write(p []byte) (n int, err error) {
return
}
-func (t *validatingTarget) Close() (*transformer.AccessIdentifiers, error) {
+func (t *validatingTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
if !t.unpreparedObject {
// check payload size correctness
if t.payloadSz != t.writtenPayload {
@@ -127,5 +128,5 @@ func (t *validatingTarget) Close() (*transformer.AccessIdentifiers, error) {
}
}
- return t.nextTarget.Close()
+ return t.nextTarget.Close(ctx)
}
diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go
index 4da2b23a7..def934ea6 100644
--- a/pkg/services/object/response.go
+++ b/pkg/services/object/response.go
@@ -59,12 +59,12 @@ func (s *ResponseService) Get(req *object.GetRequest, stream GetObjectStream) er
})
}
-func (s *putStreamResponser) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
+func (s *putStreamResponser) Send(ctx context.Context, req *object.PutRequest) error {
+ return s.stream.Send(ctx, req)
}
-func (s *putStreamResponser) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
+func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not receive response: %w", s, err)
}
@@ -72,19 +72,19 @@ func (s *putStreamResponser) CloseAndRecv() (*object.PutResponse, error) {
return r.(*object.PutResponse), nil
}
-func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *ResponseService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
return &putStreamResponser{
stream: s.respSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
+ func(ctx context.Context, req any) error {
+ return stream.Send(ctx, req.(*object.PutRequest))
},
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
+ func(ctx context.Context) (util.ResponseMessage, error) {
+ return stream.CloseAndRecv(ctx)
},
),
}, nil
diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go
index d95c6c906..ccce9c4f4 100644
--- a/pkg/services/object/server.go
+++ b/pkg/services/object/server.go
@@ -27,15 +27,15 @@ type SearchStream interface {
// PutObjectStream is an interface of FrostFS API v2 compatible client's object streamer.
type PutObjectStream interface {
- Send(*object.PutRequest) error
- CloseAndRecv() (*object.PutResponse, error)
+ Send(context.Context, *object.PutRequest) error
+ CloseAndRecv(context.Context) (*object.PutResponse, error)
}
// ServiceServer is an interface of utility
// serving v2 Object service.
type ServiceServer interface {
Get(*object.GetRequest, GetObjectStream) error
- Put(context.Context) (PutObjectStream, error)
+ Put() (PutObjectStream, error)
Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error)
Search(*object.SearchRequest, SearchStream) error
Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 585fc659a..9d66c76ba 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -70,12 +70,12 @@ func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error
)
}
-func (s *putStreamSigner) Send(req *object.PutRequest) error {
- return s.stream.Send(req)
+func (s *putStreamSigner) Send(ctx context.Context, req *object.PutRequest) error {
+ return s.stream.Send(ctx, req)
}
-func (s *putStreamSigner) CloseAndRecv() (*object.PutResponse, error) {
- r, err := s.stream.CloseAndRecv()
+func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ r, err := s.stream.CloseAndRecv(ctx)
if err != nil {
return nil, fmt.Errorf("could not receive response: %w", err)
}
@@ -83,19 +83,19 @@ func (s *putStreamSigner) CloseAndRecv() (*object.PutResponse, error) {
return r.(*object.PutResponse), nil
}
-func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) {
- stream, err := s.svc.Put(ctx)
+func (s *SignService) Put() (PutObjectStream, error) {
+ stream, err := s.svc.Put()
if err != nil {
return nil, fmt.Errorf("could not create Put object streamer: %w", err)
}
return &putStreamSigner{
stream: s.sigSvc.CreateRequestStreamer(
- func(req any) error {
- return stream.Send(req.(*object.PutRequest))
+ func(ctx context.Context, req any) error {
+ return stream.Send(ctx, req.(*object.PutRequest))
},
- func() (util.ResponseMessage, error) {
- return stream.CloseAndRecv()
+ func(ctx context.Context) (util.ResponseMessage, error) {
+ return stream.CloseAndRecv(ctx)
},
func() util.ResponseMessage {
return new(object.PutResponse)
diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go
index 3836103de..a7d1c486a 100644
--- a/pkg/services/object/transport_splitter.go
+++ b/pkg/services/object/transport_splitter.go
@@ -87,8 +87,8 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream)
})
}
-func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) {
- return c.next.Put(ctx)
+func (c TransportSplitter) Put() (PutObjectStream, error) {
+ return c.next.Put()
}
func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) {
diff --git a/pkg/services/object_manager/transformer/fmt.go b/pkg/services/object_manager/transformer/fmt.go
index c9b5dc967..462cc7474 100644
--- a/pkg/services/object_manager/transformer/fmt.go
+++ b/pkg/services/object_manager/transformer/fmt.go
@@ -1,6 +1,7 @@
package transformer
import (
+ "context"
"crypto/ecdsa"
"fmt"
@@ -53,15 +54,15 @@ func (f *formatter) WriteHeader(obj *object.Object) error {
return nil
}
-func (f *formatter) Write(p []byte) (n int, err error) {
- n, err = f.prm.NextTarget.Write(p)
+func (f *formatter) Write(ctx context.Context, p []byte) (n int, err error) {
+ n, err = f.prm.NextTarget.Write(ctx, p)
f.sz += uint64(n)
return
}
-func (f *formatter) Close() (*AccessIdentifiers, error) {
+func (f *formatter) Close(ctx context.Context) (*AccessIdentifiers, error) {
curEpoch := f.prm.NetworkState.CurrentEpoch()
ver := version.Current()
@@ -100,7 +101,7 @@ func (f *formatter) Close() (*AccessIdentifiers, error) {
return nil, fmt.Errorf("could not write header to next target: %w", err)
}
- if _, err := f.prm.NextTarget.Close(); err != nil {
+ if _, err := f.prm.NextTarget.Close(ctx); err != nil {
return nil, fmt.Errorf("could not close next target: %w", err)
}
diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go
index 7b717d3df..199f5d0c1 100644
--- a/pkg/services/object_manager/transformer/transformer.go
+++ b/pkg/services/object_manager/transformer/transformer.go
@@ -1,10 +1,10 @@
package transformer
import (
+ "context"
"crypto/sha256"
"fmt"
"hash"
- "io"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -27,7 +27,7 @@ type payloadSizeLimiter struct {
previous []oid.ID
- chunkWriter io.Writer
+ chunkWriter writer
splitID *object.SplitID
@@ -64,16 +64,16 @@ func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error {
return nil
}
-func (s *payloadSizeLimiter) Write(p []byte) (int, error) {
- if err := s.writeChunk(p); err != nil {
+func (s *payloadSizeLimiter) Write(ctx context.Context, p []byte) (int, error) {
+ if err := s.writeChunk(ctx, p); err != nil {
return 0, err
}
return len(p), nil
}
-func (s *payloadSizeLimiter) Close() (*AccessIdentifiers, error) {
- return s.release(true)
+func (s *payloadSizeLimiter) Close(ctx context.Context) (*AccessIdentifiers, error) {
+ return s.release(ctx, true)
}
func (s *payloadSizeLimiter) initialize() {
@@ -117,19 +117,19 @@ func (s *payloadSizeLimiter) initializeCurrent() {
s.currentHashers = payloadHashersForObject(s.current, s.withoutHomomorphicHash)
// compose multi-writer from target and all payload hashers
- ws := make([]io.Writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
+ ws := make([]writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
ws = append(ws, s.target)
for i := range s.currentHashers {
- ws = append(ws, s.currentHashers[i].hasher)
+ ws = append(ws, newWriter(s.currentHashers[i].hasher))
}
for i := range s.parentHashers {
- ws = append(ws, s.parentHashers[i].hasher)
+ ws = append(ws, newWriter(s.parentHashers[i].hasher))
}
- s.chunkWriter = io.MultiWriter(ws...)
+ s.chunkWriter = newMultiWriter(ws...)
}
func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []*payloadChecksumHasher {
@@ -174,7 +174,7 @@ func payloadHashersForObject(obj *object.Object, withoutHomomorphicHash bool) []
return hashers
}
-func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error) {
+func (s *payloadSizeLimiter) release(ctx context.Context, finalize bool) (*AccessIdentifiers, error) {
// Arg finalize is true only when called from Close method.
// We finalize parent and generate linking objects only if it is more
// than 1 object in split-chain.
@@ -194,7 +194,7 @@ func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error)
return nil, fmt.Errorf("could not write header: %w", err)
}
- ids, err := s.target.Close()
+ ids, err := s.target.Close(ctx)
if err != nil {
return nil, fmt.Errorf("could not close target: %w", err)
}
@@ -207,7 +207,7 @@ func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error)
s.initializeLinking(ids.Parent())
s.initializeCurrent()
- if _, err := s.release(false); err != nil {
+ if _, err := s.release(ctx, false); err != nil {
return nil, fmt.Errorf("could not release linking object: %w", err)
}
}
@@ -228,7 +228,7 @@ func (s *payloadSizeLimiter) initializeLinking(parHdr *object.Object) {
s.current.SetSplitID(s.splitID)
}
-func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
+func (s *payloadSizeLimiter) writeChunk(ctx context.Context, chunk []byte) error {
// statement is true if the previous write of bytes reached exactly the boundary.
if s.written > 0 && s.written%s.maxSize == 0 {
if s.written == s.maxSize {
@@ -236,7 +236,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
}
// we need to release current object
- if _, err := s.release(false); err != nil {
+ if _, err := s.release(ctx, false); err != nil {
return fmt.Errorf("could not release object: %w", err)
}
@@ -255,7 +255,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
cut = leftToEdge
}
- if _, err := s.chunkWriter.Write(chunk[:cut]); err != nil {
+ if _, err := s.chunkWriter.Write(ctx, chunk[:cut]); err != nil {
return fmt.Errorf("could not write chunk to target: %w", err)
}
@@ -264,7 +264,7 @@ func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
// if there are more bytes in buffer we call method again to start filling another object
if ln > leftToEdge {
- return s.writeChunk(chunk[cut:])
+ return s.writeChunk(ctx, chunk[cut:])
}
return nil
diff --git a/pkg/services/object_manager/transformer/types.go b/pkg/services/object_manager/transformer/types.go
index 0fa3b6436..3e6e2feff 100644
--- a/pkg/services/object_manager/transformer/types.go
+++ b/pkg/services/object_manager/transformer/types.go
@@ -1,7 +1,7 @@
package transformer
import (
- "io"
+ "context"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -35,7 +35,7 @@ type ObjectTarget interface {
// Can be called multiple times.
//
// Must not be called after Close call.
- io.Writer
+ Write(ctx context.Context, p []byte) (n int, err error)
// Close is used to finish object writing.
//
@@ -45,7 +45,7 @@ type ObjectTarget interface {
// Must be called no more than once. Control remains with the caller.
// Re-calling can lead to undefined behavior
// that depends on the implementation.
- Close() (*AccessIdentifiers, error)
+ Close(ctx context.Context) (*AccessIdentifiers, error)
}
// TargetInitializer represents ObjectTarget constructor.
diff --git a/pkg/services/object_manager/transformer/writer.go b/pkg/services/object_manager/transformer/writer.go
new file mode 100644
index 000000000..27aed16ff
--- /dev/null
+++ b/pkg/services/object_manager/transformer/writer.go
@@ -0,0 +1,52 @@
+package transformer
+
+import (
+ "context"
+ "io"
+)
+
+type writer interface {
+ Write(ctx context.Context, p []byte) (n int, err error)
+}
+
+type multiWriter struct {
+ writers []writer
+}
+
+func (t *multiWriter) Write(ctx context.Context, p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ n, err = w.Write(ctx, p)
+ if err != nil {
+ return
+ }
+ if n != len(p) {
+ err = io.ErrShortWrite
+ return
+ }
+ }
+ return len(p), nil
+}
+
+func newMultiWriter(writers ...writer) writer {
+ allWriters := make([]writer, 0, len(writers))
+ for _, w := range writers {
+ if mw, ok := w.(*multiWriter); ok {
+ allWriters = append(allWriters, mw.writers...)
+ } else {
+ allWriters = append(allWriters, w)
+ }
+ }
+ return &multiWriter{allWriters}
+}
+
+type writerWrapper struct {
+ Writer io.Writer
+}
+
+func (w *writerWrapper) Write(_ context.Context, p []byte) (n int, err error) {
+ return w.Writer.Write(p)
+}
+
+func newWriter(w io.Writer) writer {
+ return &writerWrapper{Writer: w}
+}
diff --git a/pkg/services/util/response/client_stream.go b/pkg/services/util/response/client_stream.go
index f167f005a..b541c73db 100644
--- a/pkg/services/util/response/client_stream.go
+++ b/pkg/services/util/response/client_stream.go
@@ -1,6 +1,7 @@
package response
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
@@ -17,8 +18,8 @@ type ClientMessageStreamer struct {
}
// Send calls send method of internal streamer.
-func (s *ClientMessageStreamer) Send(req any) error {
- if err := s.send(req); err != nil {
+func (s *ClientMessageStreamer) Send(ctx context.Context, req any) error {
+ if err := s.send(ctx, req); err != nil {
return fmt.Errorf("(%T) could not send the request: %w", s, err)
}
return nil
@@ -26,8 +27,8 @@ func (s *ClientMessageStreamer) Send(req any) error {
// CloseAndRecv closes internal stream, receivers the response,
// sets meta values and returns the result.
-func (s *ClientMessageStreamer) CloseAndRecv() (util.ResponseMessage, error) {
- resp, err := s.close()
+func (s *ClientMessageStreamer) CloseAndRecv(ctx context.Context) (util.ResponseMessage, error) {
+ resp, err := s.close(ctx)
if err != nil {
return nil, fmt.Errorf("(%T) could not close stream and receive response: %w", s, err)
}
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index 2478e6256..cb4be3084 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -37,9 +37,9 @@ var ErrAbortStream = errors.New("abort message stream")
type ResponseConstructor func() ResponseMessage
-type RequestMessageWriter func(any) error
+type RequestMessageWriter func(context.Context, any) error
-type ClientStreamCloser func() (ResponseMessage, error)
+type ClientStreamCloser func(context.Context) (ResponseMessage, error)
type RequestMessageStreamer struct {
key *ecdsa.PrivateKey
@@ -61,7 +61,7 @@ func NewUnarySignService(key *ecdsa.PrivateKey) *SignService {
}
}
-func (s *RequestMessageStreamer) Send(req any) error {
+func (s *RequestMessageStreamer) Send(ctx context.Context, req any) error {
// req argument should be strengthen with type RequestMessage
s.statusSupported = isStatusSupported(req.(RequestMessage)) // panic is OK here for now
@@ -71,7 +71,7 @@ func (s *RequestMessageStreamer) Send(req any) error {
if err = signature.VerifyServiceMessage(req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
- err = s.send(req)
+ err = s.send(ctx, req)
}
if err != nil {
@@ -87,7 +87,7 @@ func (s *RequestMessageStreamer) Send(req any) error {
return nil
}
-func (s *RequestMessageStreamer) CloseAndRecv() (ResponseMessage, error) {
+func (s *RequestMessageStreamer) CloseAndRecv(ctx context.Context) (ResponseMessage, error) {
var (
resp ResponseMessage
err error
@@ -96,7 +96,7 @@ func (s *RequestMessageStreamer) CloseAndRecv() (ResponseMessage, error) {
if s.sendErr != nil {
err = s.sendErr
} else {
- resp, err = s.close()
+ resp, err = s.close(ctx)
if err != nil {
err = fmt.Errorf("could not close stream and receive response: %w", err)
}
From ece6c820e70d8a58ab0a20f72c45708c8d411958 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 15:40:52 +0300
Subject: [PATCH 0014/1943] [#199] putsvc: Refactor streamer initialization
Resolve funlen linter for initTarget method
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/streamer.go | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 678cff572..fed161e03 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -51,7 +51,6 @@ func (p *Streamer) MaxObjectSize() uint64 {
return p.maxPayloadSz
}
-// nolint: funlen
func (p *Streamer) initTarget(prm *PutInitPrm) error {
// prevent re-calling
if p.target != nil {
@@ -69,19 +68,26 @@ func (p *Streamer) initTarget(prm *PutInitPrm) error {
}
if prm.hdr.Signature() != nil {
- p.relay = prm.relay
+ return p.initUntrustedTarget(prm)
+ }
+ return p.initTrustedTarget(prm)
+}
- // prepare untrusted-Put object target
- p.target = &validatingTarget{
- nextTarget: p.newCommonTarget(prm),
- fmt: p.fmtValidator,
+func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
+ p.relay = prm.relay
- maxPayloadSz: p.maxPayloadSz,
- }
+ // prepare untrusted-Put object target
+ p.target = &validatingTarget{
+ nextTarget: p.newCommonTarget(prm),
+ fmt: p.fmtValidator,
- return nil
+ maxPayloadSz: p.maxPayloadSz,
}
+ return nil
+}
+
+func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
sToken := prm.common.SessionToken()
// prepare trusted-Put object target
From 0948a280fa0dd5341c25885e8790e6af16c40f88 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 3 Apr 2023 10:44:05 +0300
Subject: [PATCH 0015/1943] [#195] morph: use blocking unlimited pool for
notifications
With non-blocking pool restricted by 10 in capacity, the probability of
dropping events is unexpectedly big. Notifications are an essential part of the FrostFS,
we should not drop anything, especially new epochs.
```
Mar 31 07:07:03 vedi neofs-ir[19164]: 2023-03-31T07:07:03.901Z debug subscriber/subscriber.go:154 new notification event from sidechain {"name": "NewEpoch"}
Mar 31 07:07:03 vedi neofs-ir[19164]: 2023-03-31T07:07:03.901Z warn event/listener.go:248 listener worker pool drained {"chain": "morph", "capacity": 10}
```
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-node/morph.go | 13 ++-----------
pkg/innerring/innerring.go | 13 ++-----------
pkg/morph/event/listener.go | 14 +++-----------
3 files changed, 7 insertions(+), 33 deletions(-)
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 439de3a9e..2dfbe5c18 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -181,14 +181,6 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
}
func listenMorphNotifications(ctx context.Context, c *cfg) {
- // listenerPoolCap is a capacity of a
- // worker pool inside the listener. It
- // is used to prevent blocking in neo-go:
- // the client cannot make RPC requests if
- // the notification channel is not being
- // read by another goroutine.
- const listenerPoolCap = 10
-
var (
err error
subs subscriber.Subscriber
@@ -208,9 +200,8 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fatalOnErr(err)
lis, err := event.NewListener(event.ListenerParams{
- Logger: c.log,
- Subscriber: subs,
- WorkerPoolCapacity: listenerPoolCap,
+ Logger: c.log,
+ Subscriber: subs,
})
fatalOnErr(err)
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 38023932f..3b42a5853 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -409,14 +409,6 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan
}
func createListener(ctx context.Context, cli *client.Client, p *chainParams) (event.Listener, error) {
- // listenerPoolCap is a capacity of a
- // worker pool inside the listener. It
- // is used to prevent blocking in neo-go:
- // the client cannot make RPC requests if
- // the notification channel is not being
- // read by another goroutine.
- const listenerPoolCap = 10
-
var (
sub subscriber.Subscriber
err error
@@ -432,9 +424,8 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev
}
listener, err := event.NewListener(event.ListenerParams{
- Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
- Subscriber: sub,
- WorkerPoolCapacity: listenerPoolCap,
+ Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))},
+ Subscriber: sub,
})
if err != nil {
return nil, err
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index 64fdc3df3..0bc7e89f8 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -600,11 +600,6 @@ func (l *listener) RegisterBlockHandler(handler BlockHandler) {
// NewListener create the notification event listener instance and returns Listener interface.
func NewListener(p ListenerParams) (Listener, error) {
- // defaultPoolCap is a default worker
- // pool capacity if it was not specified
- // via params
- const defaultPoolCap = 10
-
switch {
case p.Logger == nil:
return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilLogger)
@@ -612,12 +607,9 @@ func NewListener(p ListenerParams) (Listener, error) {
return nil, fmt.Errorf("%s: %w", newListenerFailMsg, errNilSubscriber)
}
- poolCap := p.WorkerPoolCapacity
- if poolCap == 0 {
- poolCap = defaultPoolCap
- }
-
- pool, err := ants.NewPool(poolCap, ants.WithNonblocking(true))
+ // The pool here must be blocking, otherwise notifications could be dropped.
+ // The default capacity is 0, which means "infinite".
+ pool, err := ants.NewPool(p.WorkerPoolCapacity)
if err != nil {
return nil, fmt.Errorf("could not init worker pool: %w", err)
}
From 8e5a0dcf272dda711449a31746f602d5a72c81f1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 19:15:13 +0300
Subject: [PATCH 0016/1943] [#204] gc: Fix GC handlers start
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/shard/gc.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index c3bb841d2..6f18e6c3a 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -136,7 +136,8 @@ func (gc *gc) listenEvents(ctx context.Context) {
v.cancelFunc()
v.prevGroup.Wait()
- ctx, v.cancelFunc = context.WithCancel(ctx)
+ var runCtx context.Context
+ runCtx, v.cancelFunc = context.WithCancel(ctx)
v.prevGroup.Add(len(v.handlers))
@@ -144,7 +145,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
h := v.handlers[i]
err := gc.workerPool.Submit(func() {
- h(ctx, event)
+ h(runCtx, event)
v.prevGroup.Done()
})
if err != nil {
From e85e5382e4bebc574aa7569635137be1b96f6ed2 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 3 Apr 2023 14:06:57 +0300
Subject: [PATCH 0017/1943] [#175] adm: list containers using `containersOf`
Signed-off-by: Evgenii Stratonikov
---
.../internal/modules/morph/container.go | 22 ++++++++++++++-----
1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/container.go b/cmd/frostfs-adm/internal/modules/morph/container.go
index 9bc6cae41..b5447fcfe 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container.go
@@ -42,15 +42,27 @@ func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker, c Client
}
func getContainersList(inv *invoker.Invoker, ch util.Uint160) ([][]byte, error) {
- res, err := inv.Call(ch, "list", "")
+ sid, r, err := unwrap.SessionIterator(inv.Call(ch, "containersOf", ""))
if err != nil {
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- itm, err := unwrap.Item(res, err)
- if _, ok := itm.(stackitem.Null); !ok {
- return unwrap.ArrayOfBytes(res, err)
+ // Nothing bad, except live session on the server, do not report to the user.
+ defer func() { _ = inv.TerminateSession(sid) }()
+
+ var lst [][]byte
+
+ items, err := inv.TraverseIterator(sid, &r, 0)
+ for err == nil && len(items) != 0 {
+ for j := range items {
+ b, err := items[j].TryBytes()
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+ lst = append(lst, b)
+ }
+ items, err = inv.TraverseIterator(sid, &r, 0)
}
- return nil, nil
+ return lst, err
}
func dumpContainers(cmd *cobra.Command, _ []string) error {
From 49cc23e03c8da451fe67f67509183441d3ee740d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 3 Apr 2023 14:37:05 +0300
Subject: [PATCH 0018/1943] [#175] adm: pipeline container iteration
Do not accumulate everything in memory.
Also, CLI should be responsive.
Signed-off-by: Evgenii Stratonikov
---
.../internal/modules/morph/container.go | 134 ++++++++++--------
1 file changed, 77 insertions(+), 57 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/container.go b/cmd/frostfs-adm/internal/modules/morph/container.go
index b5447fcfe..687d7e84e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container.go
@@ -41,28 +41,28 @@ func getContainerContractHash(cmd *cobra.Command, inv *invoker.Invoker, c Client
return ch, nil
}
-func getContainersList(inv *invoker.Invoker, ch util.Uint160) ([][]byte, error) {
+func iterateContainerList(inv *invoker.Invoker, ch util.Uint160, f func([]byte) error) error {
sid, r, err := unwrap.SessionIterator(inv.Call(ch, "containersOf", ""))
if err != nil {
- return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
// Nothing bad, except live session on the server, do not report to the user.
defer func() { _ = inv.TerminateSession(sid) }()
- var lst [][]byte
-
items, err := inv.TraverseIterator(sid, &r, 0)
for err == nil && len(items) != 0 {
for j := range items {
b, err := items[j].TryBytes()
if err != nil {
- return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+ if err := f(b); err != nil {
+ return err
}
- lst = append(lst, b)
}
items, err = inv.TraverseIterator(sid, &r, 0)
}
- return lst, err
+ return err
}
func dumpContainers(cmd *cobra.Command, _ []string) error {
@@ -83,56 +83,81 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("unable to get contaract hash: %w", err)
}
- cids, err := getContainersList(inv, ch)
- if err != nil {
- return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
-
isOK, err := getCIDFilterFunc(cmd)
if err != nil {
return err
}
- var containers []*Container
- bw := io.NewBufBinWriter()
- for _, id := range cids {
- if !isOK(id) {
- continue
- }
- bw.Reset()
- emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
- emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
- res, err := inv.Run(bw.Bytes())
- if err != nil {
- return fmt.Errorf("can't get container info: %w", err)
- }
- if len(res.Stack) != 2 {
- return fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
- }
-
- cnt := new(Container)
- err = cnt.FromStackItem(res.Stack[0])
- if err != nil {
- return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
-
- ea := new(EACL)
- err = ea.FromStackItem(res.Stack[1])
- if err != nil {
- return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
- if len(ea.Value) != 0 {
- cnt.EACL = ea
- }
-
- containers = append(containers, cnt)
- }
-
- out, err := json.Marshal(containers)
+ f, err := os.OpenFile(filename, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0o660)
if err != nil {
return err
}
- return os.WriteFile(filename, out, 0o660)
+ defer f.Close()
+
+ _, err = f.Write([]byte{'['})
+ if err != nil {
+ return err
+ }
+
+ written := 0
+ enc := json.NewEncoder(f)
+ bw := io.NewBufBinWriter()
+ iterErr := iterateContainerList(inv, ch, func(id []byte) error {
+ if !isOK(id) {
+ return nil
+ }
+
+ cnt, err := dumpSingleContainer(bw, ch, inv, id)
+ if err != nil {
+ return err
+ }
+
+ // Writing directly to the file is ok, because json.Encoder does no internal buffering.
+ if written != 0 {
+ _, err = f.Write([]byte{','})
+ if err != nil {
+ return err
+ }
+ }
+
+ written++
+ return enc.Encode(cnt)
+ })
+ if iterErr != nil {
+ return iterErr
+ }
+
+ _, err = f.Write([]byte{']'})
+ return err
+}
+
+func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
+ bw.Reset()
+ emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
+ emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
+ res, err := inv.Run(bw.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("can't get container info: %w", err)
+ }
+ if len(res.Stack) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
+ }
+
+ cnt := new(Container)
+ err = cnt.FromStackItem(res.Stack[0])
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+
+ ea := new(EACL)
+ err = ea.FromStackItem(res.Stack[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
+ }
+ if len(ea.Value) != 0 {
+ cnt.EACL = ea
+ }
+ return cnt, nil
}
func listContainers(cmd *cobra.Command, _ []string) error {
@@ -148,20 +173,15 @@ func listContainers(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("unable to get contaract hash: %w", err)
}
- cids, err := getContainersList(inv, ch)
- if err != nil {
- return fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
-
- for _, id := range cids {
+ return iterateContainerList(inv, ch, func(id []byte) error {
var idCnr cid.ID
err = idCnr.Decode(id)
if err != nil {
return fmt.Errorf("unable to decode container id: %w", err)
}
cmd.Println(idCnr)
- }
- return nil
+ return nil
+ })
}
func restoreContainers(cmd *cobra.Command, _ []string) error {
From ee7468daa73fe6230a5aa165506710ba2dca1f96 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 4 Apr 2023 09:36:22 +0300
Subject: [PATCH 0019/1943] [#205] innerring: Provide `alphabetState` param to
`epochTimer`
Fix NPE, introduced in f09ee27a.
Signed-off-by: Evgenii Stratonikov
---
pkg/innerring/initialization.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 5a8fdcc3d..30b8d43a0 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -323,6 +323,7 @@ func (s *Server) createIRFetcher() irFetcher {
func (s *Server) initTimers(cfg *viper.Viper, processors *serverProcessors, morphClients *serverMorphClients) {
s.epochTimer = newEpochTimer(&epochTimerArgs{
l: s.log,
+ alphabetState: s,
newEpochHandlers: s.newEpochTickHandlers(),
cnrWrapper: morphClients.CnrClient,
epoch: s,
From 594b5821edff66368876895fe86c6b8c07144eee Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 30 Mar 2023 12:31:29 +0300
Subject: [PATCH 0020/1943] [#188] blobstor: Refactor put data to blobovniczas
Resolve funlen linter for Blobovniczas.Put method
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/blobovniczatree/put.go | 140 +++++++++---------
1 file changed, 73 insertions(+), 67 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 438c2e233..db7ca1082 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -13,8 +13,6 @@ import (
// Put saves object in the maximum weight blobobnicza.
//
// returns error if could not save object in any blobovnicza.
-//
-// nolint: funlen
func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
if b.readOnly {
return common.PutRes{}, common.ErrReadOnly
@@ -28,77 +26,85 @@ func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
putPrm.SetAddress(prm.Address)
putPrm.SetMarshaledObject(prm.RawData)
- var (
- fn func(string) (bool, error)
- id *blobovnicza.ID
- allFull = true
- )
-
- fn = func(p string) (bool, error) {
- active, err := b.getActivated(p)
- if err != nil {
- if !isLogical(err) {
- b.reportError("could not get active blobovnicza", err)
- } else {
- b.log.Debug("could not get active blobovnicza",
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- if _, err := active.blz.Put(putPrm); err != nil {
- // Check if blobovnicza is full. We could either receive `blobovnicza.ErrFull` error
- // or update active blobovnicza in other thread. In the latter case the database will be closed
- // and `updateActive` takes care of not updating the active blobovnicza twice.
- if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
- if isFull {
- b.log.Debug("blobovnicza overflowed",
- zap.String("path", filepath.Join(p, u64ToHexString(active.ind))))
- }
-
- if err := b.updateActive(p, &active.ind); err != nil {
- if !isLogical(err) {
- b.reportError("could not update active blobovnicza", err)
- } else {
- b.log.Debug("could not update active blobovnicza",
- zap.String("level", p),
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- return fn(p)
- }
-
- allFull = false
- if !isLogical(err) {
- b.reportError("could not put object to active blobovnicza", err)
- } else {
- b.log.Debug("could not put object to active blobovnicza",
- zap.String("path", filepath.Join(p, u64ToHexString(active.ind))),
- zap.String("error", err.Error()))
- }
-
- return false, nil
- }
-
- p = filepath.Join(p, u64ToHexString(active.ind))
-
- id = blobovnicza.NewIDFromBytes([]byte(p))
-
- return true, nil
+ it := &putIterator{
+ B: b,
+ ID: nil,
+ AllFull: true,
+ PutPrm: putPrm,
}
- if err := b.iterateDeepest(prm.Address, fn); err != nil {
+ if err := b.iterateDeepest(prm.Address, it.iterate); err != nil {
return common.PutRes{}, err
- } else if id == nil {
- if allFull {
+ } else if it.ID == nil {
+ if it.AllFull {
return common.PutRes{}, common.ErrNoSpace
}
return common.PutRes{}, errPutFailed
}
- return common.PutRes{StorageID: id.Bytes()}, nil
+ return common.PutRes{StorageID: it.ID.Bytes()}, nil
+}
+
+type putIterator struct {
+ B *Blobovniczas
+ ID *blobovnicza.ID
+ AllFull bool
+ PutPrm blobovnicza.PutPrm
+}
+
+func (i *putIterator) iterate(path string) (bool, error) {
+ active, err := i.B.getActivated(path)
+ if err != nil {
+ if !isLogical(err) {
+ i.B.reportError("could not get active blobovnicza", err)
+ } else {
+ i.B.log.Debug("could not get active blobovnicza",
+ zap.String("error", err.Error()))
+ }
+
+ return false, nil
+ }
+
+ if _, err := active.blz.Put(i.PutPrm); err != nil {
+ // Check if blobovnicza is full. We could either receive `blobovnicza.ErrFull` error
+ // or update active blobovnicza in other thread. In the latter case the database will be closed
+ // and `updateActive` takes care of not updating the active blobovnicza twice.
+ if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
+ if isFull {
+ i.B.log.Debug("blobovnicza overflowed",
+ zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
+ }
+
+ if err := i.B.updateActive(path, &active.ind); err != nil {
+ if !isLogical(err) {
+ i.B.reportError("could not update active blobovnicza", err)
+ } else {
+ i.B.log.Debug("could not update active blobovnicza",
+ zap.String("level", path),
+ zap.String("error", err.Error()))
+ }
+
+ return false, nil
+ }
+
+ return i.iterate(path)
+ }
+
+ i.AllFull = false
+ if !isLogical(err) {
+ i.B.reportError("could not put object to active blobovnicza", err)
+ } else {
+ i.B.log.Debug("could not put object to active blobovnicza",
+ zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
+ zap.String("error", err.Error()))
+ }
+
+ return false, nil
+ }
+
+ path = filepath.Join(path, u64ToHexString(active.ind))
+
+ i.ID = blobovnicza.NewIDFromBytes([]byte(path))
+
+ return true, nil
}
From 8273a3dfb26665bd4f29d0d3aad6c7e6c44f1c8c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 30 Mar 2023 13:15:29 +0300
Subject: [PATCH 0021/1943] [#188] blobstor: Refactor blobstor test
Resolve funlen linter for TestIterate function
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/internal/blobstortest/iterate.go | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index e2a310efc..f98cca638 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -9,7 +9,6 @@ import (
"github.com/stretchr/testify/require"
)
-// nolint: funlen
func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t)
require.NoError(t, s.Open(false))
@@ -28,6 +27,14 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
objects = append(objects[:delID], objects[delID+1:]...)
+ runTestNormalHandler(t, s, objects)
+
+ runTestLazyHandler(t, s, objects)
+
+ runTestIgnoreLogicalErrors(t, s, objects)
+}
+
+func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("normal handler", func(t *testing.T) {
seen := make(map[string]objectDesc)
@@ -52,7 +59,9 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
require.Equal(t, objects[i].storageID, d.storageID)
}
})
+}
+func runTestLazyHandler(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("lazy handler", func(t *testing.T) {
seen := make(map[string]func() ([]byte, error))
@@ -74,7 +83,9 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
require.Equal(t, objects[i].raw, data)
}
})
+}
+func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []objectDesc) {
t.Run("ignore errors doesn't work for logical errors", func(t *testing.T) {
seen := make(map[string]objectDesc)
From 0739c36a3b336e8bdce8de34b611a8009ad57c02 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 30 Mar 2023 13:42:10 +0300
Subject: [PATCH 0022/1943] [#188] metabase: Refactor object put to metabase
Resolve funlen linter for db.put method
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/put.go | 53 ++++++++++++++----------
1 file changed, 32 insertions(+), 21 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 07bb4c2b5..8e11c5d9c 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -10,6 +10,7 @@ import (
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -76,10 +77,11 @@ func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
return
}
-// nolint: funlen
-func (db *DB) put(
- tx *bbolt.Tx, obj *objectSDK.Object, id []byte,
- si *objectSDK.SplitInfo, currEpoch uint64) error {
+func (db *DB) put(tx *bbolt.Tx,
+ obj *objectSDK.Object,
+ id []byte,
+ si *objectSDK.SplitInfo,
+ currEpoch uint64) error {
cnr, ok := obj.ContainerID()
if !ok {
return errors.New("missing container in object")
@@ -95,25 +97,34 @@ func (db *DB) put(
return err // return any error besides SplitInfoError
}
- // most right child and split header overlap parent so we have to
- // check if object exists to not overwrite it twice
if exists {
- // When storage engine moves objects between different sub-storages,
- // it calls metabase.Put method with new storage ID, thus triggering this code.
- if !isParent && id != nil {
- return updateStorageID(tx, object.AddressOf(obj), id)
- }
-
- // when storage already has last object in split hierarchy and there is
- // a linking object to put (or vice versa), we should update split info
- // with object ids of these objects
- if isParent {
- return updateSplitInfo(tx, object.AddressOf(obj), si)
- }
-
- return nil
+ return db.updateObj(tx, obj, id, si, isParent)
}
+ return db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
+}
+
+func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
+ // most right child and split header overlap parent so we have to
+ // check if object exists to not overwrite it twice
+
+ // When storage engine moves objects between different sub-storages,
+ // it calls metabase.Put method with new storage ID, thus triggering this code.
+ if !isParent && id != nil {
+ return updateStorageID(tx, object.AddressOf(obj), id)
+ }
+
+ // when storage already has last object in split hierarchy and there is
+ // a linking object to put (or vice versa), we should update split info
+ // with object ids of these objects
+ if isParent {
+ return updateSplitInfo(tx, object.AddressOf(obj), si)
+ }
+
+ return nil
+}
+
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
@@ -126,7 +137,7 @@ func (db *DB) put(
}
}
- err = putUniqueIndexes(tx, obj, si, id)
+ err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
return fmt.Errorf("can't put unique indexes: %w", err)
}
From 3010ca26494d63ca81651dd2a22765fdaf3b6d52 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 30 Mar 2023 17:32:33 +0300
Subject: [PATCH 0023/1943] [#188] engine: Refactor get object from engine
Resolve funlen linter for StorageEngine.get method
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/get.go | 196 +++++++++++++------------
1 file changed, 103 insertions(+), 93 deletions(-)
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 787a7bac8..4d0a30bc8 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -52,124 +52,134 @@ func (e *StorageEngine) Get(prm GetPrm) (res GetRes, err error) {
return
}
-// nolint: funlen
func (e *StorageEngine) get(prm GetPrm) (GetRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddGetDuration)()
}
- var (
- obj *objectSDK.Object
- siErr *objectSDK.SplitInfoError
-
- errNotFound apistatus.ObjectNotFound
-
- outSI *objectSDK.SplitInfo
- outError error = errNotFound
-
- shardWithMeta hashedShard
- metaError error
- )
+ var errNotFound apistatus.ObjectNotFound
var shPrm shard.GetPrm
shPrm.SetAddress(prm.addr)
- var hasDegraded bool
- var objectExpired bool
-
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- noMeta := sh.GetMode().NoMetabase()
- shPrm.SetIgnoreMeta(noMeta)
-
- hasDegraded = hasDegraded || noMeta
-
- res, err := sh.Get(shPrm)
- if err != nil {
- if res.HasMeta() {
- shardWithMeta = sh
- metaError = err
- }
- switch {
- case shard.IsErrNotFound(err):
- return false // ignore, go to next shard
- case errors.As(err, &siErr):
- if outSI == nil {
- outSI = objectSDK.NewSplitInfo()
- }
-
- util.MergeSplitInfo(siErr.SplitInfo(), outSI)
-
- _, withLink := outSI.Link()
- _, withLast := outSI.LastPart()
-
- // stop iterating over shards if SplitInfo structure is complete
- if withLink && withLast {
- return true
- }
-
- return false
- case shard.IsErrRemoved(err):
- outError = err
-
- return true // stop, return it back
- case shard.IsErrObjectExpired(err):
- // object is found but should not
- // be returned
- objectExpired = true
- return true
- default:
- e.reportShardError(sh, "could not get object from shard", err)
- return false
- }
- }
-
- obj = res.Object()
-
- return true
- })
-
- if outSI != nil {
- return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
+ it := &getShardIterator{
+ OutError: errNotFound,
+ ShardPrm: shPrm,
+ Address: prm.addr,
+ Engine: e,
}
- if objectExpired {
+ it.tryGetWithMeta()
+
+ if it.SplitInfo != nil {
+ return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
+ }
+
+ if it.ObjectExpired {
return GetRes{}, errNotFound
}
- if obj == nil {
- if !hasDegraded && shardWithMeta.Shard == nil || !shard.IsErrNotFound(outError) {
- return GetRes{}, outError
+ if it.Object == nil {
+ if !it.HasDegraded && it.ShardWithMeta.Shard == nil || !shard.IsErrNotFound(it.OutError) {
+ return GetRes{}, it.OutError
}
- // If the object is not found but is present in metabase,
- // try to fetch it from blobstor directly. If it is found in any
- // blobstor, increase the error counter for the shard which contains the meta.
- shPrm.SetIgnoreMeta(true)
+ it.tryGetFromBlobstore()
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- if sh.GetMode().NoMetabase() {
- // Already visited.
- return false
- }
-
- res, err := sh.Get(shPrm)
- obj = res.Object()
- return err == nil
- })
- if obj == nil {
- return GetRes{}, outError
+ if it.Object == nil {
+ return GetRes{}, it.OutError
}
- if shardWithMeta.Shard != nil {
- e.reportShardError(shardWithMeta, "meta info was present, but object is missing",
- metaError, zap.Stringer("address", prm.addr))
+ if it.ShardWithMeta.Shard != nil {
+ e.reportShardError(it.ShardWithMeta, "meta info was present, but object is missing",
+ it.MetaError, zap.Stringer("address", prm.addr))
}
}
return GetRes{
- obj: obj,
+ obj: it.Object,
}, nil
}
+type getShardIterator struct {
+ Object *objectSDK.Object
+ SplitInfo *objectSDK.SplitInfo
+ OutError error
+ ShardWithMeta hashedShard
+ MetaError error
+ HasDegraded bool
+ ObjectExpired bool
+
+ ShardPrm shard.GetPrm
+ Address oid.Address
+ Engine *StorageEngine
+
+ splitInfoErr *objectSDK.SplitInfoError
+}
+
+func (i *getShardIterator) tryGetWithMeta() {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ noMeta := sh.GetMode().NoMetabase()
+ i.ShardPrm.SetIgnoreMeta(noMeta)
+
+ i.HasDegraded = i.HasDegraded || noMeta
+
+ res, err := sh.Get(i.ShardPrm)
+ if err == nil {
+ i.Object = res.Object()
+ return true
+ }
+
+ if res.HasMeta() {
+ i.ShardWithMeta = sh
+ i.MetaError = err
+ }
+ switch {
+ case shard.IsErrNotFound(err):
+ return false // ignore, go to next shard
+ case errors.As(err, &i.splitInfoErr):
+ if i.SplitInfo == nil {
+ i.SplitInfo = objectSDK.NewSplitInfo()
+ }
+
+ util.MergeSplitInfo(i.splitInfoErr.SplitInfo(), i.SplitInfo)
+
+ _, withLink := i.SplitInfo.Link()
+ _, withLast := i.SplitInfo.LastPart()
+
+ // stop iterating over shards if SplitInfo structure is complete
+ return withLink && withLast
+ case shard.IsErrRemoved(err):
+ i.OutError = err
+ return true // stop, return it back
+ case shard.IsErrObjectExpired(err):
+ // object is found but should not be returned
+ i.ObjectExpired = true
+ return true
+ default:
+ i.Engine.reportShardError(sh, "could not get object from shard", err)
+ return false
+ }
+ })
+}
+
+func (i *getShardIterator) tryGetFromBlobstore() {
+ // If the object is not found but is present in metabase,
+ // try to fetch it from blobstor directly. If it is found in any
+ // blobstor, increase the error counter for the shard which contains the meta.
+ i.ShardPrm.SetIgnoreMeta(true)
+
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ if sh.GetMode().NoMetabase() {
+ // Already visited.
+ return false
+ }
+
+ res, err := sh.Get(i.ShardPrm)
+ i.Object = res.Object()
+ return err == nil
+ })
+}
+
// Get reads object from local storage by provided address.
func Get(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
var getPrm GetPrm
From 456bc097f7144c7bde37d7a5e8a5fc8f2b8afb85 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 30 Mar 2023 18:44:43 +0300
Subject: [PATCH 0024/1943] [#188] engine: Refactor get range from engine
Resolve funlen linter for StorageEngine.getRange method
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/range.go | 197 ++++++++++++-----------
1 file changed, 104 insertions(+), 93 deletions(-)
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index 491b226bf..25b533bd4 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -65,122 +65,51 @@ func (e *StorageEngine) GetRange(prm RngPrm) (res RngRes, err error) {
return
}
-// nolint: funlen
func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddRangeDuration)()
}
- var (
- obj *objectSDK.Object
- siErr *objectSDK.SplitInfoError
-
- errNotFound apistatus.ObjectNotFound
-
- outSI *objectSDK.SplitInfo
- outError error = errNotFound
-
- shardWithMeta hashedShard
- metaError error
- )
-
- var hasDegraded bool
+ var errNotFound apistatus.ObjectNotFound
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
shPrm.SetRange(prm.off, prm.ln)
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- noMeta := sh.GetMode().NoMetabase()
- hasDegraded = hasDegraded || noMeta
- shPrm.SetIgnoreMeta(noMeta)
-
- res, err := sh.GetRange(shPrm)
- if err != nil {
- if res.HasMeta() {
- shardWithMeta = sh
- metaError = err
- }
- switch {
- case shard.IsErrNotFound(err):
- return false // ignore, go to next shard
- case errors.As(err, &siErr):
- if outSI == nil {
- outSI = objectSDK.NewSplitInfo()
- }
-
- util.MergeSplitInfo(siErr.SplitInfo(), outSI)
-
- _, withLink := outSI.Link()
- _, withLast := outSI.LastPart()
-
- // stop iterating over shards if SplitInfo structure is complete
- if withLink && withLast {
- return true
- }
-
- return false
- case
- shard.IsErrRemoved(err),
- shard.IsErrOutOfRange(err):
- outError = err
-
- return true // stop, return it back
- default:
- e.reportShardError(sh, "could not get object from shard", err)
- return false
- }
- }
-
- obj = res.Object()
-
- return true
- })
-
- if outSI != nil {
- return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(outSI))
+ it := &getRangeShardIterator{
+ OutError: errNotFound,
+ ShardPrm: shPrm,
+ Address: prm.addr,
+ Engine: e,
}
- if obj == nil {
+ it.tryGetWithMeta()
+
+ if it.SplitInfo != nil {
+ return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
+ }
+
+ if it.Object == nil {
// If any shard is in a degraded mode, we should assume that metabase could store
// info about some object.
- if shardWithMeta.Shard == nil && !hasDegraded || !shard.IsErrNotFound(outError) {
- return RngRes{}, outError
+ if it.ShardWithMeta.Shard == nil && !it.HasDegraded || !shard.IsErrNotFound(it.OutError) {
+ return RngRes{}, it.OutError
}
- // If the object is not found but is present in metabase,
- // try to fetch it from blobstor directly. If it is found in any
- // blobstor, increase the error counter for the shard which contains the meta.
- shPrm.SetIgnoreMeta(true)
+ it.tryGetFromBlobstor()
- e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- if sh.GetMode().NoMetabase() {
- // Already processed it without a metabase.
- return false
- }
-
- res, err := sh.GetRange(shPrm)
- if shard.IsErrOutOfRange(err) {
- var errOutOfRange apistatus.ObjectOutOfRange
-
- outError = errOutOfRange
- return true
- }
- obj = res.Object()
- return err == nil
- })
- if obj == nil {
- return RngRes{}, outError
+ if it.Object == nil {
+ return RngRes{}, it.OutError
}
- if shardWithMeta.Shard != nil {
- e.reportShardError(shardWithMeta, "meta info was present, but object is missing",
- metaError,
+ if it.ShardWithMeta.Shard != nil {
+ e.reportShardError(it.ShardWithMeta, "meta info was present, but object is missing",
+ it.MetaError,
zap.Stringer("address", prm.addr))
}
}
return RngRes{
- obj: obj,
+ obj: it.Object,
}, nil
}
@@ -197,3 +126,85 @@ func GetRange(storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([
return res.Object().Payload(), nil
}
+
+type getRangeShardIterator struct {
+ Object *objectSDK.Object
+ SplitInfoError *objectSDK.SplitInfoError
+ SplitInfo *objectSDK.SplitInfo
+ OutError error
+ ShardWithMeta hashedShard
+ MetaError error
+ HasDegraded bool
+
+ ShardPrm shard.RngPrm
+ Address oid.Address
+ Engine *StorageEngine
+}
+
+func (i *getRangeShardIterator) tryGetWithMeta() {
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ noMeta := sh.GetMode().NoMetabase()
+ i.HasDegraded = i.HasDegraded || noMeta
+ i.ShardPrm.SetIgnoreMeta(noMeta)
+
+ res, err := sh.GetRange(i.ShardPrm)
+ if err == nil {
+ i.Object = res.Object()
+ return true
+ }
+
+ if res.HasMeta() {
+ i.ShardWithMeta = sh
+ i.MetaError = err
+ }
+ switch {
+ case shard.IsErrNotFound(err):
+ return false // ignore, go to next shard
+ case errors.As(err, &i.SplitInfoError):
+ if i.SplitInfo == nil {
+ i.SplitInfo = objectSDK.NewSplitInfo()
+ }
+
+ util.MergeSplitInfo(i.SplitInfoError.SplitInfo(), i.SplitInfo)
+
+ _, withLink := i.SplitInfo.Link()
+ _, withLast := i.SplitInfo.LastPart()
+
+ // stop iterating over shards if SplitInfo structure is complete
+ return withLink && withLast
+ case
+ shard.IsErrRemoved(err),
+ shard.IsErrOutOfRange(err):
+ i.OutError = err
+
+ return true // stop, return it back
+ default:
+ i.Engine.reportShardError(sh, "could not get object from shard", err)
+ return false
+ }
+ })
+}
+
+func (i *getRangeShardIterator) tryGetFromBlobstor() {
+ // If the object is not found but is present in metabase,
+ // try to fetch it from blobstor directly. If it is found in any
+ // blobstor, increase the error counter for the shard which contains the meta.
+ i.ShardPrm.SetIgnoreMeta(true)
+
+ i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
+ if sh.GetMode().NoMetabase() {
+ // Already processed it without a metabase.
+ return false
+ }
+
+ res, err := sh.GetRange(i.ShardPrm)
+ if shard.IsErrOutOfRange(err) {
+ var errOutOfRange apistatus.ObjectOutOfRange
+
+ i.OutError = errOutOfRange
+ return true
+ }
+ i.Object = res.Object()
+ return err == nil
+ })
+}
From 5a66db80c564c1d45bad0e74f952c1884828b26f Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 11:33:08 +0300
Subject: [PATCH 0025/1943] [#188] engine: Refactor shard evacuation
Resolve funlen and gocognit linter for StorageEngine.Evacuate method
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/evacuate.go | 216 +++++++++++---------
1 file changed, 120 insertions(+), 96 deletions(-)
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 4bc7eac1a..457228bb2 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -57,35 +58,89 @@ var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-//
-// nolint: funlen, gocognit
func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error) {
- sidList := make([]string, len(prm.shardID))
+ shardIDs := make([]string, len(prm.shardID))
for i := range prm.shardID {
- sidList[i] = prm.shardID[i].String()
+ shardIDs[i] = prm.shardID[i].String()
}
+ shards, weights, err := e.getActualShards(shardIDs, prm.handler != nil)
+ if err != nil {
+ return EvacuateShardRes{}, err
+ }
+
+ shardsToEvacuate := make(map[string]*shard.Shard)
+ for i := range shardIDs {
+ for j := range shards {
+ if shards[j].ID().String() == shardIDs[i] {
+ shardsToEvacuate[shardIDs[i]] = shards[j].Shard
+ }
+ }
+ }
+
+ e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs))
+
+ var res EvacuateShardRes
+
+ for _, shardID := range shardIDs {
+ if err = e.evacuateShard(shardID, prm, &res, shards, weights, shardsToEvacuate); err != nil {
+ return res, err
+ }
+ }
+
+ e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs))
+ return res, nil
+}
+
+func (e *StorageEngine) evacuateShard(shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
+ var listPrm shard.ListWithCursorPrm
+ listPrm.WithCount(defaultEvacuateBatchSize)
+
+ sh := shardsToEvacuate[shardID]
+
+ var c *meta.Cursor
+ for {
+ listPrm.WithCursor(c)
+
+ // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
+ // because ListWithCursor works only with the metabase.
+ listRes, err := sh.ListWithCursor(listPrm)
+ if err != nil {
+ if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
+ break
+ }
+ return err
+ }
+
+ if err = e.evacuateObjects(sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
+ return err
+ }
+
+ c = listRes.Cursor()
+ }
+ return nil
+}
+
+func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool) ([]pooledShard, []float64, error) {
e.mtx.RLock()
- for i := range sidList {
- sh, ok := e.shards[sidList[i]]
+ defer e.mtx.RUnlock()
+
+ for i := range shardIDs {
+ sh, ok := e.shards[shardIDs[i]]
if !ok {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, errShardNotFound
+ return nil, nil, errShardNotFound
}
if !sh.GetMode().ReadOnly() {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, shard.ErrMustBeReadOnly
+ return nil, nil, shard.ErrMustBeReadOnly
}
}
- if len(e.shards)-len(sidList) < 1 && prm.handler == nil {
- e.mtx.RUnlock()
- return EvacuateShardRes{}, errMustHaveTwoShards
+ if len(e.shards)-len(shardIDs) < 1 && !handlerDefined {
+ return nil, nil, errMustHaveTwoShards
}
- e.log.Info("started shards evacuation", zap.Strings("shard_ids", sidList))
-
// We must have all shards, to have correct information about their
// indexes in a sorted slice and set appropriate marks in the metabase.
// Evacuated shard is skipped during put.
@@ -96,100 +151,69 @@ func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error)
pool: e.shardPools[id],
})
}
- e.mtx.RUnlock()
weights := make([]float64, 0, len(shards))
for i := range shards {
weights = append(weights, e.shardWeight(shards[i].Shard))
}
- shardMap := make(map[string]*shard.Shard)
- for i := range sidList {
- for j := range shards {
- if shards[j].ID().String() == sidList[i] {
- shardMap[sidList[i]] = shards[j].Shard
+ return shards, weights, nil
+}
+
+func (e *StorageEngine) evacuateObjects(sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
+ for i := range toEvacuate {
+ addr := toEvacuate[i].Address
+
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+
+ getRes, err := sh.Get(getPrm)
+ if err != nil {
+ if prm.ignoreErrors {
+ continue
}
+ return err
}
+
+ if e.tryEvacuateObject(addr, getRes.Object(), sh, res, shards, weights, shardsToEvacuate) {
+ continue
+ }
+
+ if prm.handler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
+ }
+
+ err = prm.handler(addr, getRes.Object())
+ if err != nil {
+ return err
+ }
+ res.count++
}
+ return nil
+}
- var listPrm shard.ListWithCursorPrm
- listPrm.WithCount(defaultEvacuateBatchSize)
-
- var res EvacuateShardRes
-
-mainLoop:
- for n := range sidList {
- sh := shardMap[sidList[n]]
-
- var c *meta.Cursor
- for {
- listPrm.WithCursor(c)
-
- // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
- // because ListWithCursor works only with the metabase.
- listRes, err := sh.ListWithCursor(listPrm)
- if err != nil {
- if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
- continue mainLoop
- }
- return res, err
- }
-
- // TODO (@fyrchik): #1731 parallelize the loop
- lst := listRes.AddressList()
-
- loop:
- for i := range lst {
- addr := lst[i].Address
-
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
-
- getRes, err := sh.Get(getPrm)
- if err != nil {
- if prm.ignoreErrors {
- continue
- }
- return res, err
- }
-
- hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(addr.EncodeToString())))
- for j := range shards {
- if _, ok := shardMap[shards[j].ID().String()]; ok {
- continue
- }
- putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, addr, getRes.Object())
- if putDone || exists {
- if putDone {
- e.log.Debug("object is moved to another shard",
- zap.String("from", sidList[n]),
- zap.Stringer("to", shards[j].ID()),
- zap.Stringer("addr", addr))
-
- res.count++
- }
- continue loop
- }
- }
-
- if prm.handler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return res, fmt.Errorf("%w: %s", errPutShard, lst[i])
- }
-
- err = prm.handler(addr, getRes.Object())
- if err != nil {
- return res, err
- }
+func (e *StorageEngine) tryEvacuateObject(addr oid.Address, object *objectSDK.Object, sh *shard.Shard, res *EvacuateShardRes,
+ shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) bool {
+ hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(addr.EncodeToString())))
+ for j := range shards {
+ if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
+ continue
+ }
+ putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, addr, object)
+ if putDone || exists {
+ if putDone {
+ e.log.Debug("object is moved to another shard",
+ zap.Stringer("from", sh.ID()),
+ zap.Stringer("to", shards[j].ID()),
+ zap.Stringer("addr", addr))
res.count++
}
-
- c = listRes.Cursor()
+ return true
}
}
- e.log.Info("finished shards evacuation",
- zap.Strings("shard_ids", sidList))
- return res, nil
+ return false
}
From 1f1aed87be8acbf577fe65bd49cbd5ceb59bf8c5 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 14:12:18 +0300
Subject: [PATCH 0026/1943] [#188] metabase: Refactor object inhume
Resolve funlen linter for db.Inhume method
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/inhume.go | 271 +++++++++++---------
1 file changed, 150 insertions(+), 121 deletions(-)
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 3f33f990c..b6e6cadf1 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -118,8 +118,6 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
-//
-// nolint: funlen, gocognit
func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -131,145 +129,176 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
}
currEpoch := db.epochState.CurrentEpoch()
- var inhumed uint64
-
err = db.boltDB.Update(func(tx *bbolt.Tx) error {
- garbageBKT := tx.Bucket(garbageBucketName)
- graveyardBKT := tx.Bucket(graveyardBucketName)
+ return db.inhumeTx(tx, currEpoch, prm, &res)
+ })
- var (
- // target bucket of the operation, one of the:
- // 1. Graveyard if Inhume was called with a Tombstone
- // 2. Garbage if Inhume was called with a GC mark
- bkt *bbolt.Bucket
- // value that will be put in the bucket, one of the:
- // 1. tombstone address if Inhume was called with
- // a Tombstone
- // 2. zeroValue if Inhume was called with a GC mark
- value []byte
- )
+ return
+}
- if prm.tomb != nil {
- bkt = graveyardBKT
- tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
+func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes) error {
+ garbageBKT := tx.Bucket(garbageBucketName)
+ graveyardBKT := tx.Bucket(graveyardBucketName)
- // it is forbidden to have a tomb-on-tomb in FrostFS,
- // so graveyard keys must not be addresses of tombstones
- data := bkt.Get(tombKey)
- if data != nil {
- err := bkt.Delete(tombKey)
- if err != nil {
- return fmt.Errorf("could not remove grave with tombstone key: %w", err)
- }
- }
+ bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm)
+ if err != nil {
+ return err
+ }
- value = tombKey
- } else {
- bkt = garbageBKT
- value = zeroValue
+ buf := make([]byte, addressKeySize)
+ for i := range prm.target {
+ id := prm.target[i].Object()
+ cnr := prm.target[i].Container()
+
+ // prevent locked objects to be inhumed
+ if !prm.forceRemoval && objectLocked(tx, cnr, id) {
+ return apistatus.ObjectLocked{}
}
- buf := make([]byte, addressKeySize)
- for i := range prm.target {
- id := prm.target[i].Object()
- cnr := prm.target[i].Container()
+ var lockWasChecked bool
- // prevent locked objects to be inhumed
- if !prm.forceRemoval && objectLocked(tx, cnr, id) {
- return apistatus.ObjectLocked{}
+ // prevent lock objects to be inhumed
+ // if `Inhume` was called not with the
+ // `WithForceGCMark` option
+ if !prm.forceRemoval {
+ if isLockObject(tx, cnr, id) {
+ return ErrLockObjectRemoval
}
- var lockWasChecked bool
+ lockWasChecked = true
+ }
- // prevent lock objects to be inhumed
- // if `Inhume` was called not with the
- // `WithForceGCMark` option
- if !prm.forceRemoval {
- if isLockObject(tx, cnr, id) {
- return ErrLockObjectRemoval
- }
-
- lockWasChecked = true
+ obj, err := db.get(tx, prm.target[i], buf, false, true, epoch)
+ targetKey := addressKey(prm.target[i], buf)
+ if err == nil {
+ err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res)
+ if err != nil {
+ return err
}
+ }
- obj, err := db.get(tx, prm.target[i], buf, false, true, currEpoch)
- targetKey := addressKey(prm.target[i], buf)
- if err == nil {
- containerID, _ := obj.ContainerID()
- if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 {
- inhumed++
- res.storeDeletionInfo(containerID, obj.PayloadSize())
- }
-
- // if object is stored, and it is regular object then update bucket
- // with container size estimations
- if obj.Type() == object.TypeRegular {
- err := changeContainerSize(tx, cnr, obj.PayloadSize(), false)
- if err != nil {
- return err
- }
- }
- }
-
- if prm.tomb != nil {
- targetIsTomb := false
-
- // iterate over graveyard and check if target address
- // is the address of tombstone in graveyard.
- err = bkt.ForEach(func(k, v []byte) error {
- // check if graveyard has record with key corresponding
- // to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, targetKey)
-
- if targetIsTomb {
- // break bucket iterator
- return errBreakBucketForEach
- }
-
- return nil
- })
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return err
- }
-
- // do not add grave if target is a tombstone
- if targetIsTomb {
- continue
- }
-
- // if tombstone appears object must be
- // additionally marked with GC
- err = garbageBKT.Put(targetKey, zeroValue)
- if err != nil {
- return err
- }
- }
-
- // consider checking if target is already in graveyard?
- err = bkt.Put(targetKey, value)
+ if prm.tomb != nil {
+ var isTomb bool
+ isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey)
if err != nil {
return err
}
- if prm.lockObjectHandling {
- // do not perform lock check if
- // it was already called
- if lockWasChecked {
- // inhumed object is not of
- // the LOCK type
- continue
- }
-
- if isLockObject(tx, cnr, id) {
- res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
- }
+ if isTomb {
+ continue
}
}
- return db.updateCounter(tx, logical, inhumed, false)
- })
+ // consider checking if target is already in graveyard?
+ err = bkt.Put(targetKey, value)
+ if err != nil {
+ return err
+ }
- res.availableImhumed = inhumed
+ if prm.lockObjectHandling {
+ // do not perform lock check if
+ // it was already called
+ if lockWasChecked {
+ // inhumed object is not of
+ // the LOCK type
+ continue
+ }
- return
+ if isLockObject(tx, cnr, id) {
+ res.deletedLockObj = append(res.deletedLockObj, prm.target[i])
+ }
+ }
+ }
+
+ return db.updateCounter(tx, logical, res.availableImhumed, false)
+}
+
+// getInhumeTargetBucketAndValue return target bucket to store inhume result and value that will be put in the bucket.
+//
+// target bucket of the operation, one of the:
+// 1. Graveyard if Inhume was called with a Tombstone
+// 2. Garbage if Inhume was called with a GC mark
+//
+// value that will be put in the bucket, one of the:
+// 1. tombstone address if Inhume was called with
+// a Tombstone
+// 2. zeroValue if Inhume was called with a GC mark
+func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) {
+ if prm.tomb != nil {
+ targetBucket = graveyardBKT
+ tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize))
+
+ // it is forbidden to have a tomb-on-tomb in FrostFS,
+ // so graveyard keys must not be addresses of tombstones
+ data := targetBucket.Get(tombKey)
+ if data != nil {
+ err := targetBucket.Delete(tombKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
+ }
+ }
+
+ value = tombKey
+ } else {
+ targetBucket = garbageBKT
+ value = zeroValue
+ }
+ return targetBucket, value, nil
+}
+
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
+ targetIsTomb, err := isTomb(graveyardBKT, key)
+ if err != nil {
+ return false, err
+ }
+
+ // do not add grave if target is a tombstone
+ if targetIsTomb {
+ return true, nil
+ }
+
+ // if tombstone appears object must be
+ // additionally marked with GC
+ return false, garbageBKT.Put(key, zeroValue)
+}
+
+func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *object.Object, res *InhumeRes) error {
+ containerID, _ := obj.ContainerID()
+ if inGraveyardWithKey(targetKey, graveyardBKT, garbageBKT) == 0 {
+ res.availableImhumed++
+ res.storeDeletionInfo(containerID, obj.PayloadSize())
+ }
+
+ // if object is stored, and it is regular object then update bucket
+ // with container size estimations
+ if obj.Type() == object.TypeRegular {
+ err := changeContainerSize(tx, cnr, obj.PayloadSize(), false)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
+ targetIsTomb := false
+
+ // iterate over graveyard and check if target address
+ // is the address of tombstone in graveyard.
+ err := graveyardBucket.ForEach(func(k, v []byte) error {
+ // check if graveyard has record with key corresponding
+ // to tombstone address (at least one)
+ targetIsTomb = bytes.Equal(v, key)
+
+ if targetIsTomb {
+ // break bucket iterator
+ return errBreakBucketForEach
+ }
+
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBreakBucketForEach) {
+ return false, err
+ }
+ return targetIsTomb, nil
}
From cd33a57f443b2c3348b6eca3e13dbbda4de63fb8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 11:34:47 +0300
Subject: [PATCH 0027/1943] [#207] aclsvc: Refactor EACL check
Resolve funlen linter for CheckEACL method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/acl/acl.go | 70 ++++++++++++++++++++--------------
1 file changed, 41 insertions(+), 29 deletions(-)
diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go
index a068eadb8..87d2f9c82 100644
--- a/pkg/services/object/acl/acl.go
+++ b/pkg/services/object/acl/acl.go
@@ -14,6 +14,7 @@ import (
bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -118,8 +119,6 @@ func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool {
}
// CheckEACL is a main check function for extended ACL.
-//
-// nolint: funlen
func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
basicACL := reqInfo.BasicACL()
if !basicACL.Extendable() {
@@ -154,6 +153,44 @@ func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
return err
}
+ hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo)
+ if err != nil {
+ return err
+ }
+
+ eaclRole := getRole(reqInfo)
+
+ action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
+ WithRole(eaclRole).
+ WithOperation(eaclSDK.Operation(reqInfo.Operation())).
+ WithContainerID(&cnr).
+ WithSenderKey(reqInfo.SenderKey()).
+ WithHeaderSource(hdrSrc).
+ WithEACLTable(&table),
+ )
+
+ if action != eaclSDK.ActionAllow {
+ return errEACLDeniedByRule
+ }
+ return nil
+}
+
+func getRole(reqInfo v2.RequestInfo) eaclSDK.Role {
+ var eaclRole eaclSDK.Role
+ switch op := reqInfo.RequestRole(); op {
+ default:
+ eaclRole = eaclSDK.Role(op)
+ case acl.RoleOwner:
+ eaclRole = eaclSDK.RoleUser
+ case acl.RoleInnerRing, acl.RoleContainer:
+ eaclRole = eaclSDK.RoleSystem
+ case acl.RoleOthers:
+ eaclRole = eaclSDK.RoleOthers
+ }
+ return eaclRole
+}
+
+func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) {
hdrSrcOpts := make([]eaclV2.Option, 0, 3)
hdrSrcOpts = append(hdrSrcOpts,
@@ -175,34 +212,9 @@ func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error {
hdrSrc, err := eaclV2.NewMessageHeaderSource(hdrSrcOpts...)
if err != nil {
- return fmt.Errorf("can't parse headers: %w", err)
+ return nil, fmt.Errorf("can't parse headers: %w", err)
}
-
- var eaclRole eaclSDK.Role
- switch op := reqInfo.RequestRole(); op {
- default:
- eaclRole = eaclSDK.Role(op)
- case acl.RoleOwner:
- eaclRole = eaclSDK.RoleUser
- case acl.RoleInnerRing, acl.RoleContainer:
- eaclRole = eaclSDK.RoleSystem
- case acl.RoleOthers:
- eaclRole = eaclSDK.RoleOthers
- }
-
- action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit).
- WithRole(eaclRole).
- WithOperation(eaclSDK.Operation(reqInfo.Operation())).
- WithContainerID(&cnr).
- WithSenderKey(reqInfo.SenderKey()).
- WithHeaderSource(hdrSrc).
- WithEACLTable(&table),
- )
-
- if action != eaclSDK.ActionAllow {
- return errEACLDeniedByRule
- }
- return nil
+ return hdrSrc, nil
}
// isValidBearer checks whether bearer token was correctly signed by authorized
From 9ef790f7824259733990ef4ec60d8676b04fd49b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 12:03:19 +0300
Subject: [PATCH 0028/1943] [#207] aclsvc: Refactor object headers read
Resolve funlen linter for readObjectHeaders method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/acl/eacl/v2/headers.go | 161 +++++++++++----------
1 file changed, 84 insertions(+), 77 deletions(-)
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
index 63e733cf6..736c05763 100644
--- a/pkg/services/object/acl/eacl/v2/headers.go
+++ b/pkg/services/object/acl/eacl/v2/headers.go
@@ -101,96 +101,103 @@ func requestHeaders(msg xHeaderSource) []eaclSDK.Header {
var errMissingOID = errors.New("object ID is missing")
-// nolint: funlen
func (h *cfg) readObjectHeaders(dst *headerSource) error {
switch m := h.msg.(type) {
default:
panic(fmt.Sprintf("unexpected message type %T", h.msg))
case requestXHeaderSource:
- switch req := m.req.(type) {
- case
- *objectV2.GetRequest,
- *objectV2.HeadRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
-
- dst.objectHeaders = objHeaders
- dst.incompleteObjectHeaders = !completed
- case
- *objectV2.GetRangeRequest,
- *objectV2.GetRangeHashRequest,
- *objectV2.DeleteRequest:
- if h.obj == nil {
- return errMissingOID
- }
-
- dst.objectHeaders = addressHeaders(h.cnr, h.obj)
- case *objectV2.PutRequest:
- if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
-
- dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.SearchRequest:
- cnrV2 := req.GetBody().GetContainerID()
- var cnr cid.ID
-
- if cnrV2 != nil {
- if err := cnr.ReadFromV2(*cnrV2); err != nil {
- return fmt.Errorf("can't parse container ID: %w", err)
- }
- }
-
- dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
- }
+ return h.readObjectHeadersFromRequestXHeaderSource(m, dst)
case responseXHeaderSource:
- switch resp := m.resp.(type) {
- default:
- objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+ return h.readObjectHeadersResponseXHeaderSource(m, dst)
+ }
+}
- dst.objectHeaders = objectHeaders
- dst.incompleteObjectHeaders = !completed
- case *objectV2.GetResponse:
- if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
- oV2 := new(objectV2.Object)
- oV2.SetObjectID(v.GetObjectID())
- oV2.SetHeader(v.GetHeader())
+func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error {
+ switch req := m.req.(type) {
+ case
+ *objectV2.GetRequest,
+ *objectV2.HeadRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
- dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
- }
- case *objectV2.HeadResponse:
+ objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objHeaders
+ dst.incompleteObjectHeaders = !completed
+ case
+ *objectV2.GetRangeRequest,
+ *objectV2.GetRangeHashRequest,
+ *objectV2.DeleteRequest:
+ if h.obj == nil {
+ return errMissingOID
+ }
+
+ dst.objectHeaders = addressHeaders(h.cnr, h.obj)
+ case *objectV2.PutRequest:
+ if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok {
oV2 := new(objectV2.Object)
-
- var hdr *objectV2.Header
-
- switch v := resp.GetBody().GetHeaderPart().(type) {
- case *objectV2.ShortHeader:
- hdr = new(objectV2.Header)
-
- var idV2 refsV2.ContainerID
- h.cnr.WriteToV2(&idV2)
-
- hdr.SetContainerID(&idV2)
- hdr.SetVersion(v.GetVersion())
- hdr.SetCreationEpoch(v.GetCreationEpoch())
- hdr.SetOwnerID(v.GetOwnerID())
- hdr.SetObjectType(v.GetObjectType())
- hdr.SetPayloadLength(v.GetPayloadLength())
- case *objectV2.HeaderWithSignature:
- hdr = v.GetHeader()
- }
-
- oV2.SetHeader(hdr)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
}
- }
+ case *objectV2.SearchRequest:
+ cnrV2 := req.GetBody().GetContainerID()
+ var cnr cid.ID
+ if cnrV2 != nil {
+ if err := cnr.ReadFromV2(*cnrV2); err != nil {
+ return fmt.Errorf("can't parse container ID: %w", err)
+ }
+ }
+
+ dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)}
+ }
+ return nil
+}
+
+func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error {
+ switch resp := m.resp.(type) {
+ default:
+ objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj)
+
+ dst.objectHeaders = objectHeaders
+ dst.incompleteObjectHeaders = !completed
+ case *objectV2.GetResponse:
+ if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok {
+ oV2 := new(objectV2.Object)
+ oV2.SetObjectID(v.GetObjectID())
+ oV2.SetHeader(v.GetHeader())
+
+ dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
+ }
+ case *objectV2.HeadResponse:
+ oV2 := new(objectV2.Object)
+
+ var hdr *objectV2.Header
+
+ switch v := resp.GetBody().GetHeaderPart().(type) {
+ case *objectV2.ShortHeader:
+ hdr = new(objectV2.Header)
+
+ var idV2 refsV2.ContainerID
+ h.cnr.WriteToV2(&idV2)
+
+ hdr.SetContainerID(&idV2)
+ hdr.SetVersion(v.GetVersion())
+ hdr.SetCreationEpoch(v.GetCreationEpoch())
+ hdr.SetOwnerID(v.GetOwnerID())
+ hdr.SetObjectType(v.GetObjectType())
+ hdr.SetPayloadLength(v.GetPayloadLength())
+ case *objectV2.HeaderWithSignature:
+ hdr = v.GetHeader()
+ }
+
+ oV2.SetHeader(hdr)
+
+ dst.objectHeaders = headersFromObject(object.NewFromV2(oV2), h.cnr, h.obj)
+ }
return nil
}
From 585415fa92782dc157ef0681bb74428fc33793b9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 12:18:43 +0300
Subject: [PATCH 0029/1943] [#207] aclsvc: Refactor send checker
Resolve funlen linter for putStreamBasicChecker.Send method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/acl/v2/service.go | 53 ++++++++++++++++-----------
1 file changed, 31 insertions(+), 22 deletions(-)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 1e451a99f..6544d78d7 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -443,7 +443,6 @@ func (b Service) GetRangeHash(
return b.next.GetRangeHash(ctx, request)
}
-// nolint: funlen
func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
body := request.GetBody()
if body == nil {
@@ -482,27 +481,9 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe
}
var sTok *sessionSDK.Object
-
- if tokV2 := request.GetMetaHeader().GetSessionToken(); tokV2 != nil {
- sTok = new(sessionSDK.Object)
-
- err = sTok.ReadFromV2(*tokV2)
- if err != nil {
- return fmt.Errorf("invalid session token: %w", err)
- }
-
- if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
- // if session relates to object's removal, we don't check
- // relation of the tombstone to the session here since user
- // can't predict tomb's ID.
- err = assertSessionRelation(*sTok, cnr, nil)
- } else {
- err = assertSessionRelation(*sTok, cnr, obj)
- }
-
- if err != nil {
- return err
- }
+ sTok, err = p.readSessionToken(cnr, obj, request)
+ if err != nil {
+ return err
}
bTok, err := originalBearerToken(request.GetMetaHeader())
@@ -534,6 +515,34 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe
return p.next.Send(ctx, request)
}
+func (p putStreamBasicChecker) readSessionToken(cnr cid.ID, obj *oid.ID, request *objectV2.PutRequest) (*sessionSDK.Object, error) {
+ var sTok *sessionSDK.Object
+
+ if tokV2 := request.GetMetaHeader().GetSessionToken(); tokV2 != nil {
+ sTok = new(sessionSDK.Object)
+
+ err := sTok.ReadFromV2(*tokV2)
+ if err != nil {
+ return nil, fmt.Errorf("invalid session token: %w", err)
+ }
+
+ if sTok.AssertVerb(sessionSDK.VerbObjectDelete) {
+ // if session relates to object's removal, we don't check
+ // relation of the tombstone to the session here since user
+ // can't predict tomb's ID.
+ err = assertSessionRelation(*sTok, cnr, nil)
+ } else {
+ err = assertSessionRelation(*sTok, cnr, obj)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sTok, nil
+}
+
func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) {
return p.next.CloseAndRecv(ctx)
}
From 4941926c9d5d653d49fe8a50f1c6c685342841e8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 12:25:11 +0300
Subject: [PATCH 0030/1943] [#207] aclsvc: Drop outdated tag
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/acl/v2/util.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/acl/v2/util.go
index 5e3be6e52..aa5d67584 100644
--- a/pkg/services/object/acl/v2/util.go
+++ b/pkg/services/object/acl/v2/util.go
@@ -166,7 +166,6 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
// assertVerb checks that token verb corresponds to op.
func assertVerb(tok sessionSDK.Object, op acl.Op) bool {
- //nolint:exhaustive
switch op {
case acl.OpObjectPut:
return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete)
From 5f2a1531fef53be54179734f992696f5a8fa5252 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 12:45:59 +0300
Subject: [PATCH 0031/1943] [#208] deletesvc: Resolve containedctx linter
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/delete/delete.go | 7 +++---
pkg/services/object/delete/exec.go | 35 +++++++++++----------------
pkg/services/object/delete/local.go | 14 ++++++-----
pkg/services/object/delete/service.go | 12 +++++----
pkg/services/object/delete/util.go | 27 +++++++++++----------
5 files changed, 46 insertions(+), 49 deletions(-)
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 4a9c476d0..a959b53cb 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -23,22 +23,21 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
exec := &execCtx{
svc: s,
- ctx: ctx,
prm: prm,
}
exec.setLogger(s.log)
- exec.execute()
+ exec.execute(ctx)
return exec.statusError.err
}
-func (exec *execCtx) execute() {
+func (exec *execCtx) execute(ctx context.Context) {
exec.log.Debug("serving request...")
// perform local operation
- exec.executeLocal()
+ exec.executeLocal(ctx)
exec.analyzeStatus(true)
}
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index 4da4c8083..782cad71b 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -18,12 +18,9 @@ type statusError struct {
err error
}
-// nolint: containedctx
type execCtx struct {
svc *Service
- ctx context.Context
-
prm Prm
statusError
@@ -52,10 +49,6 @@ func (exec *execCtx) setLogger(l *logger.Logger) {
)}
}
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
func (exec execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -80,10 +73,10 @@ func (exec *execCtx) newAddress(id oid.ID) oid.Address {
return a
}
-func (exec *execCtx) formSplitInfo() bool {
+func (exec *execCtx) formSplitInfo(ctx context.Context) bool {
var err error
- exec.splitInfo, err = exec.svc.header.splitInfo(exec)
+ exec.splitInfo, err = exec.svc.header.splitInfo(ctx, exec)
switch {
default:
@@ -101,29 +94,29 @@ func (exec *execCtx) formSplitInfo() bool {
return err == nil
}
-func (exec *execCtx) collectMembers() (ok bool) {
+func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) {
if exec.splitInfo == nil {
exec.log.Debug("no split info, object is PHY")
return true
}
if _, withLink := exec.splitInfo.Link(); withLink {
- ok = exec.collectChildren()
+ ok = exec.collectChildren(ctx)
}
if !ok {
if _, withLast := exec.splitInfo.LastPart(); withLast {
- ok = exec.collectChain()
+ ok = exec.collectChain(ctx)
if !ok {
return
}
}
} // may be fail if neither right nor linking ID is set?
- return exec.supplementBySplitID()
+ return exec.supplementBySplitID(ctx)
}
-func (exec *execCtx) collectChain() bool {
+func (exec *execCtx) collectChain(ctx context.Context) bool {
var chain []oid.ID
exec.log.Debug("assembling chain...")
@@ -131,7 +124,7 @@ func (exec *execCtx) collectChain() bool {
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
- p, err := exec.svc.header.previous(exec, prev)
+ p, err := exec.svc.header.previous(ctx, exec, prev)
switch {
default:
@@ -160,10 +153,10 @@ func (exec *execCtx) collectChain() bool {
return true
}
-func (exec *execCtx) collectChildren() bool {
+func (exec *execCtx) collectChildren(ctx context.Context) bool {
exec.log.Debug("collecting children...")
- children, err := exec.svc.header.children(exec)
+ children, err := exec.svc.header.children(ctx, exec)
switch {
default:
@@ -187,10 +180,10 @@ func (exec *execCtx) collectChildren() bool {
}
}
-func (exec *execCtx) supplementBySplitID() bool {
+func (exec *execCtx) supplementBySplitID(ctx context.Context) bool {
exec.log.Debug("supplement by split ID")
- chain, err := exec.svc.searcher.splitMembers(exec)
+ chain, err := exec.svc.searcher.splitMembers(ctx, exec)
switch {
default:
@@ -264,8 +257,8 @@ func (exec *execCtx) initTombstoneObject() bool {
return true
}
-func (exec *execCtx) saveTombstone() bool {
- id, err := exec.svc.placer.put(exec)
+func (exec *execCtx) saveTombstone(ctx context.Context) bool {
+ id, err := exec.svc.placer.put(ctx, exec)
switch {
default:
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 36af96448..17eb0e4e1 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -1,25 +1,27 @@
package deletesvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
+func (exec *execCtx) executeLocal(ctx context.Context) {
exec.log.Debug("forming tombstone structure...")
- ok := exec.formTombstone()
+ ok := exec.formTombstone(ctx)
if !ok {
return
}
exec.log.Debug("tombstone structure successfully formed, saving...")
- exec.saveTombstone()
+ exec.saveTombstone(ctx)
}
-func (exec *execCtx) formTombstone() (ok bool) {
+func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
tsLifetime, err := exec.svc.netInfo.TombstoneLifetime()
if err != nil {
exec.status = statusUndefined
@@ -40,7 +42,7 @@ func (exec *execCtx) formTombstone() (ok bool) {
exec.log.Debug("forming split info...")
- ok = exec.formSplitInfo()
+ ok = exec.formSplitInfo(ctx)
if !ok {
return
}
@@ -49,7 +51,7 @@ func (exec *execCtx) formTombstone() (ok bool) {
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
- ok = exec.collectMembers()
+ ok = exec.collectMembers(ctx)
if !ok {
return
}
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index f2ea384de..11ff13b45 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -1,6 +1,8 @@
package deletesvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
@@ -39,20 +41,20 @@ type cfg struct {
header interface {
// must return (nil, nil) for PHY objects
- splitInfo(*execCtx) (*object.SplitInfo, error)
+ splitInfo(context.Context, *execCtx) (*object.SplitInfo, error)
- children(*execCtx) ([]oid.ID, error)
+ children(context.Context, *execCtx) ([]oid.ID, error)
// must return (nil, nil) for 1st object in chain
- previous(*execCtx, oid.ID) (*oid.ID, error)
+ previous(context.Context, *execCtx, oid.ID) (*oid.ID, error)
}
searcher interface {
- splitMembers(*execCtx) ([]oid.ID, error)
+ splitMembers(context.Context, *execCtx) ([]oid.ID, error)
}
placer interface {
- put(*execCtx) (*oid.ID, error)
+ put(context.Context, *execCtx) (*oid.ID, error)
}
netInfo NetworkInfo
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index cc5433740..f9870f7e0 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -1,6 +1,7 @@
package deletesvc
import (
+ "context"
"errors"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
@@ -20,7 +21,7 @@ type simpleIDWriter struct {
ids []oid.ID
}
-func (w *headSvcWrapper) headAddress(exec *execCtx, addr oid.Address) (*object.Object, error) {
+func (w *headSvcWrapper) headAddress(ctx context.Context, exec *execCtx, addr oid.Address) (*object.Object, error) {
wr := getsvc.NewSimpleObjectWriter()
p := getsvc.HeadPrm{}
@@ -29,7 +30,7 @@ func (w *headSvcWrapper) headAddress(exec *execCtx, addr oid.Address) (*object.O
p.WithRawFlag(true)
p.WithAddress(addr)
- err := (*getsvc.Service)(w).Head(exec.context(), p)
+ err := (*getsvc.Service)(w).Head(ctx, p)
if err != nil {
return nil, err
}
@@ -37,8 +38,8 @@ func (w *headSvcWrapper) headAddress(exec *execCtx, addr oid.Address) (*object.O
return wr.Object(), nil
}
-func (w *headSvcWrapper) splitInfo(exec *execCtx) (*object.SplitInfo, error) {
- _, err := w.headAddress(exec, exec.address())
+func (w *headSvcWrapper) splitInfo(ctx context.Context, exec *execCtx) (*object.SplitInfo, error) {
+ _, err := w.headAddress(ctx, exec, exec.address())
var errSplitInfo *object.SplitInfoError
@@ -52,12 +53,12 @@ func (w *headSvcWrapper) splitInfo(exec *execCtx) (*object.SplitInfo, error) {
}
}
-func (w *headSvcWrapper) children(exec *execCtx) ([]oid.ID, error) {
+func (w *headSvcWrapper) children(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
link, _ := exec.splitInfo.Link()
a := exec.newAddress(link)
- linking, err := w.headAddress(exec, a)
+ linking, err := w.headAddress(ctx, exec, a)
if err != nil {
return nil, err
}
@@ -65,10 +66,10 @@ func (w *headSvcWrapper) children(exec *execCtx) ([]oid.ID, error) {
return linking.Children(), nil
}
-func (w *headSvcWrapper) previous(exec *execCtx, id oid.ID) (*oid.ID, error) {
+func (w *headSvcWrapper) previous(ctx context.Context, exec *execCtx, id oid.ID) (*oid.ID, error) {
a := exec.newAddress(id)
- h, err := w.headAddress(exec, a)
+ h, err := w.headAddress(ctx, exec, a)
if err != nil {
return nil, err
}
@@ -81,7 +82,7 @@ func (w *headSvcWrapper) previous(exec *execCtx, id oid.ID) (*oid.ID, error) {
return nil, nil
}
-func (w *searchSvcWrapper) splitMembers(exec *execCtx) ([]oid.ID, error) {
+func (w *searchSvcWrapper) splitMembers(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
fs := object.SearchFilters{}
fs.AddSplitIDFilter(object.MatchStringEqual, exec.splitInfo.SplitID())
@@ -93,7 +94,7 @@ func (w *searchSvcWrapper) splitMembers(exec *execCtx) ([]oid.ID, error) {
p.WithContainerID(exec.containerID())
p.WithSearchFilters(fs)
- err := (*searchsvc.Service)(w).Search(exec.context(), p)
+ err := (*searchsvc.Service)(w).Search(ctx, p)
if err != nil {
return nil, err
}
@@ -107,7 +108,7 @@ func (s *simpleIDWriter) WriteIDs(ids []oid.ID) error {
return nil
}
-func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
+func (w *putSvcWrapper) put(ctx context.Context, exec *execCtx) (*oid.ID, error) {
streamer, err := (*putsvc.Service)(w).Put()
if err != nil {
return nil, err
@@ -124,12 +125,12 @@ func (w *putSvcWrapper) put(exec *execCtx) (*oid.ID, error) {
return nil, err
}
- err = streamer.SendChunk(exec.context(), new(putsvc.PutChunkPrm).WithChunk(payload))
+ err = streamer.SendChunk(ctx, new(putsvc.PutChunkPrm).WithChunk(payload))
if err != nil {
return nil, err
}
- r, err := streamer.Close(exec.context())
+ r, err := streamer.Close(ctx)
if err != nil {
return nil, err
}
From 0b38419fbf494b7577583034ea83e7d4ec2716a1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 12:54:21 +0300
Subject: [PATCH 0032/1943] [#208] searchsvc: Resolve context linters
Resolve containedctx and contextcheck linters.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/search/container.go | 10 +++++-----
pkg/services/object/search/exec.go | 9 ---------
pkg/services/object/search/search.go | 14 ++++++--------
pkg/services/object/search/search_test.go | 2 +-
pkg/services/object/search/service.go | 4 +++-
pkg/services/object/search/util.go | 5 +++--
6 files changed, 18 insertions(+), 26 deletions(-)
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index fffcba42a..b158bc23e 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -9,7 +9,7 @@ import (
"go.uber.org/zap"
)
-func (exec *execCtx) executeOnContainer() {
+func (exec *execCtx) executeOnContainer(ctx context.Context) {
if exec.isLocal() {
exec.log.Debug("return result directly")
return
@@ -28,7 +28,7 @@ func (exec *execCtx) executeOnContainer() {
}
for {
- if exec.processCurrentEpoch() {
+ if exec.processCurrentEpoch(ctx) {
break
}
@@ -47,7 +47,7 @@ func (exec *execCtx) executeOnContainer() {
exec.err = nil
}
-func (exec *execCtx) processCurrentEpoch() bool {
+func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
exec.log.Debug("process epoch",
zap.Uint64("number", exec.curProcEpoch),
)
@@ -57,7 +57,7 @@ func (exec *execCtx) processCurrentEpoch() bool {
return true
}
- ctx, cancel := context.WithCancel(exec.context())
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
for {
@@ -99,7 +99,7 @@ func (exec *execCtx) processCurrentEpoch() bool {
return
}
- ids, err := c.searchObjects(exec, info)
+ ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
exec.log.Debug("remote operation failed",
zap.String("error", err.Error()))
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 8cc9a9a26..f815270d9 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,8 +1,6 @@
package searchsvc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -16,12 +14,9 @@ type statusError struct {
err error
}
-// nolint: containedctx
type execCtx struct {
svc *Service
- ctx context.Context
-
prm Prm
statusError
@@ -52,10 +47,6 @@ func (exec *execCtx) setLogger(l *logger.Logger) {
)}
}
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
func (exec execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 5a6880922..325b42a54 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -10,7 +10,6 @@ import (
func (s *Service) Search(ctx context.Context, prm Prm) error {
exec := &execCtx{
svc: s,
- ctx: ctx,
prm: prm,
}
@@ -18,22 +17,21 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
exec.setLogger(s.log)
- //nolint: contextcheck
- exec.execute()
+ exec.execute(ctx)
return exec.statusError.err
}
-func (exec *execCtx) execute() {
+func (exec *execCtx) execute(ctx context.Context) {
exec.log.Debug("serving request...")
// perform local operation
exec.executeLocal()
- exec.analyzeStatus(true)
+ exec.analyzeStatus(ctx, true)
}
-func (exec *execCtx) analyzeStatus(execCnr bool) {
+func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
default:
@@ -45,7 +43,7 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
}
if execCnr {
- exec.executeOnContainer()
- exec.analyzeStatus(false)
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 1d902e7aa..e95970955 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -116,7 +116,7 @@ func (s *testStorage) search(exec *execCtx) ([]oid.ID, error) {
return v.ids, v.err
}
-func (c *testStorage) searchObjects(exec *execCtx, _ clientcore.NodeInfo) ([]oid.ID, error) {
+func (c *testStorage) searchObjects(_ context.Context, exec *execCtx, _ clientcore.NodeInfo) ([]oid.ID, error) {
v, ok := c.items[exec.containerID().EncodeToString()]
if !ok {
return nil, nil
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index aebcfca0f..b858e2219 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -1,6 +1,8 @@
package searchsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
@@ -24,7 +26,7 @@ type Option func(*cfg)
type searchClient interface {
// searchObjects searches objects on the specified node.
// MUST NOT modify execCtx as it can be accessed concurrently.
- searchObjects(*execCtx, client.NodeInfo) ([]oid.ID, error)
+ searchObjects(context.Context, *execCtx, client.NodeInfo) ([]oid.ID, error)
}
type ClientConstructor interface {
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 741a224af..610dd77ff 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -1,6 +1,7 @@
package searchsvc
import (
+ "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -77,7 +78,7 @@ func (c *clientConstructorWrapper) get(info client.NodeInfo) (searchClient, erro
}, nil
}
-func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oid.ID, error) {
+func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info client.NodeInfo) ([]oid.ID, error) {
if exec.prm.forwarder != nil {
return exec.prm.forwarder(info, c.client)
}
@@ -98,7 +99,7 @@ func (c *clientWrapper) searchObjects(exec *execCtx, info client.NodeInfo) ([]oi
var prm internalclient.SearchObjectsPrm
- prm.SetContext(exec.context())
+ prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetPrivateKey(key)
prm.SetSessionToken(exec.prm.common.SessionToken())
From d85703a963937d52eed191adf446748846a2ea16 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 13:05:40 +0300
Subject: [PATCH 0033/1943] [#208] searchsvc: Refactor request forwarding
Resolve funlen & gocognit linters for toPrm method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/search/prm.go | 4 +-
pkg/services/object/search/util.go | 2 +-
.../object/search/v2/request_forwarder.go | 99 +++++++++++++++++++
pkg/services/object/search/v2/util.go | 95 ++----------------
4 files changed, 113 insertions(+), 87 deletions(-)
create mode 100644 pkg/services/object/search/v2/request_forwarder.go
diff --git a/pkg/services/object/search/prm.go b/pkg/services/object/search/prm.go
index c80257bd3..da46dfeb6 100644
--- a/pkg/services/object/search/prm.go
+++ b/pkg/services/object/search/prm.go
@@ -1,6 +1,8 @@
package searchsvc
import (
+ "context"
+
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -29,7 +31,7 @@ type IDListWriter interface {
// RequestForwarder is a callback for forwarding of the
// original Search requests.
-type RequestForwarder func(coreclient.NodeInfo, coreclient.MultiAddressClient) ([]oid.ID, error)
+type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) ([]oid.ID, error)
// SetCommonParameters sets common parameters of the operation.
func (p *Prm) SetCommonParameters(common *util.CommonPrm) {
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 610dd77ff..c12ed2c9f 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -80,7 +80,7 @@ func (c *clientConstructorWrapper) get(info client.NodeInfo) (searchClient, erro
func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info client.NodeInfo) ([]oid.ID, error) {
if exec.prm.forwarder != nil {
- return exec.prm.forwarder(info, c.client)
+ return exec.prm.forwarder(ctx, info, c.client)
}
var sessionInfo *util.SessionInfo
diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go
new file mode 100644
index 000000000..8023f2f0f
--- /dev/null
+++ b/pkg/services/object/search/v2/request_forwarder.go
@@ -0,0 +1,99 @@
+package searchsvc
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type requestForwarder struct {
+ OnceResign *sync.Once
+ Request *objectV2.SearchRequest
+ Key *ecdsa.PrivateKey
+}
+
+func (f *requestForwarder) forwardRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) ([]oid.ID, error) {
+ var err error
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(f.Key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ var searchStream *rpc.SearchResponseReader
+ err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
+ searchStream, err = rpc.SearchObjects(cli, f.Request, rpcclient.WithContext(ctx))
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // code below is copy-pasted from c.SearchObjects implementation,
+ // perhaps it is worth highlighting the utility function in frostfs-api-go
+ var (
+ searchResult []oid.ID
+ resp = new(objectV2.SearchResponse)
+ )
+
+ for {
+ // receive message from server stream
+ err := searchStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+
+ return nil, fmt.Errorf("reading the response failed: %w", err)
+ }
+
+ // verify response key
+ if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return nil, err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return nil, fmt.Errorf("could not verify %T: %w", resp, err)
+ }
+
+ chunk := resp.GetBody().GetIDList()
+ var id oid.ID
+
+ for i := range chunk {
+ err = id.ReadFromV2(chunk[i])
+ if err != nil {
+ return nil, fmt.Errorf("invalid object ID: %w", err)
+ }
+
+ searchResult = append(searchResult, id)
+ }
+ }
+
+ return searchResult, nil
+}
diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go
index 2bde6b2f5..12158a820 100644
--- a/pkg/services/object/search/v2/util.go
+++ b/pkg/services/object/search/v2/util.go
@@ -1,20 +1,15 @@
package searchsvc
import (
+ "context"
"errors"
"fmt"
- "io"
"sync"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -22,7 +17,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-// nolint: funlen, gocognit
func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStream) (*searchsvc.Prm, error) {
body := req.GetBody()
@@ -38,8 +32,6 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
return nil, fmt.Errorf("invalid container ID: %w", err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -53,85 +45,18 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
})
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
-
key, err := s.keyStorage.GetKey(nil)
if err != nil {
return nil, err
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) ([]oid.ID, error) {
- var err error
+ forwarder := &requestForwarder{
+ OnceResign: &sync.Once{},
+ Request: req,
+ Key: key,
+ }
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- var searchStream *rpc.SearchResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- searchStream, err = rpc.SearchObjects(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.SearchObjects implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
- var (
- searchResult []oid.ID
- resp = new(objectV2.SearchResponse)
- )
-
- for {
- // receive message from server stream
- err := searchStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("could not verify %T: %w", resp, err)
- }
-
- chunk := resp.GetBody().GetIDList()
- var id oid.ID
-
- for i := range chunk {
- err = id.ReadFromV2(chunk[i])
- if err != nil {
- return nil, fmt.Errorf("invalid object ID: %w", err)
- }
-
- searchResult = append(searchResult, id)
- }
- }
-
- return searchResult, nil
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequest))
}
p.WithContainerID(id)
@@ -140,8 +65,8 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
return p, nil
}
-func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressClient, []byte) ([]oid.ID, error)) searchsvc.RequestForwarder {
- return func(info client.NodeInfo, c client.MultiAddressClient) ([]oid.ID, error) {
+func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) ([]oid.ID, error)) searchsvc.RequestForwarder {
+ return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) ([]oid.ID, error) {
var (
firstErr error
res []oid.ID
@@ -162,7 +87,7 @@ func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressCli
// would be nice to log otherwise
}()
- res, err = f(addr, c, key)
+ res, err = f(ctx, addr, c, key)
return
})
From 760af6b912d6e0c77ee44a56f08552c8b0618a56 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 5 Apr 2023 10:43:48 +0300
Subject: [PATCH 0034/1943] [#211] fstree: Consider ENOSPC a logical error
We already do this for file writing, however directory creation can also
fail.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/blobstor/fstree/fstree.go | 3 +++
1 file changed, 3 insertions(+)
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 1a1247001..99484860a 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -228,6 +228,9 @@ func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) {
p := t.treePath(prm.Address)
if err := util.MkdirAllX(filepath.Dir(p), t.Permissions); err != nil {
+ if errors.Is(err, syscall.ENOSPC) {
+ return common.PutRes{}, common.ErrNoSpace
+ }
return common.PutRes{}, err
}
if !prm.DontCompress {
From 9098d0eec0bf9e2da3a51bf4d723173093bdb271 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 5 Apr 2023 10:46:59 +0300
Subject: [PATCH 0035/1943] [#211] engine: Unify shard mode checks for tree
operations
All operations must ensure the shard is not in a degraded mode.
Write operations must also ensure the shard is not in a read-only mode.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/tree.go | 75 ++++++++++++++++++++++++++
1 file changed, 75 insertions(+)
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index 684c92e66..db07c001e 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -23,6 +23,9 @@ func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Mo
if s.info.Mode.ReadOnly() {
return nil, ErrReadOnlyMode
}
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
return s.pilorama.TreeMove(d, treeID, m)
}
@@ -38,6 +41,9 @@ func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr stri
if s.info.Mode.ReadOnly() {
return nil, ErrReadOnlyMode
}
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
return s.pilorama.TreeAddByPath(d, treeID, attr, path, meta)
}
@@ -53,6 +59,9 @@ func (s *Shard) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgr
if s.info.Mode.ReadOnly() {
return ErrReadOnlyMode
}
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
return s.pilorama.TreeApply(cnr, treeID, m, backgroundSync)
}
@@ -61,6 +70,13 @@ func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
return s.pilorama.TreeGetByPath(cid, treeID, attr, path, latest)
}
@@ -69,6 +85,13 @@ func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node)
if s.pilorama == nil {
return pilorama.Meta{}, 0, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return pilorama.Meta{}, 0, ErrDegradedMode
+ }
return s.pilorama.TreeGetMeta(cid, treeID, nodeID)
}
@@ -77,6 +100,13 @@ func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.No
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
return s.pilorama.TreeGetChildren(cid, treeID, nodeID)
}
@@ -85,6 +115,13 @@ func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilor
if s.pilorama == nil {
return pilorama.Move{}, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return pilorama.Move{}, ErrDegradedMode
+ }
return s.pilorama.TreeGetOpLog(cid, treeID, height)
}
@@ -93,6 +130,13 @@ func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error {
if s.pilorama == nil {
return ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
return s.pilorama.TreeDrop(cid, treeID)
}
@@ -101,6 +145,13 @@ func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) {
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return nil, ErrDegradedMode
+ }
return s.pilorama.TreeList(cid)
}
@@ -109,6 +160,13 @@ func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
if s.pilorama == nil {
return false, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return false, ErrDegradedMode
+ }
return s.pilorama.TreeExists(cid, treeID)
}
@@ -117,6 +175,16 @@ func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height ui
if s.pilorama == nil {
return ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
return s.pilorama.TreeUpdateLastSyncHeight(cid, treeID, height)
}
@@ -125,5 +193,12 @@ func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
return s.pilorama.TreeLastSyncHeight(cid, treeID)
}
From 23575e1ac0bd0f92debd10abed427d2f184767dc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 14:40:01 +0300
Subject: [PATCH 0036/1943] [#210] policier: Resolve contextcheck linter
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 12 ++++++------
pkg/local_object_storage/engine/delete.go | 15 ++++++++-------
pkg/local_object_storage/engine/delete_test.go | 3 ++-
pkg/local_object_storage/engine/inhume.go | 14 +++++++-------
pkg/local_object_storage/engine/inhume_test.go | 5 +++--
pkg/local_object_storage/engine/lock_test.go | 18 +++++++++---------
pkg/local_object_storage/shard/control_test.go | 4 ++--
pkg/local_object_storage/shard/inhume.go | 4 ++--
pkg/local_object_storage/shard/inhume_test.go | 3 ++-
pkg/local_object_storage/shard/lock_test.go | 12 ++++++------
pkg/local_object_storage/shard/metrics_test.go | 4 ++--
pkg/services/control/server/gc.go | 4 ++--
pkg/services/object/put/local.go | 6 +++---
pkg/services/policer/check.go | 5 ++---
pkg/services/policer/policer.go | 3 ++-
15 files changed, 58 insertions(+), 54 deletions(-)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 3b6bdcc7d..ff4335ff9 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -253,11 +253,11 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
policerconfig.HeadTimeout(c.appCfg),
),
policer.WithReplicator(c.replicator),
- policer.WithRedundantCopyCallback(func(addr oid.Address) {
+ policer.WithRedundantCopyCallback(func(ctx context.Context, addr oid.Address) {
var inhumePrm engine.InhumePrm
inhumePrm.MarkAsGarbage(addr)
- _, err := ls.Inhume(inhumePrm)
+ _, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn("could not inhume mark redundant copy as garbage",
zap.String("error", err.Error()),
@@ -620,8 +620,8 @@ func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
return e.base.IsLocked(address)
}
-func (e engineWithNotifications) Delete(tombstone oid.Address, toDelete []oid.ID) error {
- return e.base.Delete(tombstone, toDelete)
+func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
+ return e.base.Delete(ctx, tombstone, toDelete)
}
func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
@@ -657,7 +657,7 @@ func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error)
return e.engine.IsLocked(address)
}
-func (e engineWithoutNotifications) Delete(tombstone oid.Address, toDelete []oid.ID) error {
+func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
var prm engine.InhumePrm
addrs := make([]oid.Address, len(toDelete))
@@ -668,7 +668,7 @@ func (e engineWithoutNotifications) Delete(tombstone oid.Address, toDelete []oid
prm.WithTarget(tombstone, addrs...)
- _, err := e.engine.Inhume(prm)
+ _, err := e.engine.Inhume(ctx, prm)
return err
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index a49b1e8fa..6ea5728bb 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -44,16 +45,16 @@ func (p *DeletePrm) WithForceRemoval() {
// NOTE: Marks any object to be deleted (despite any prohibitions
// on operations with that object) if WithForceRemoval option has
// been provided.
-func (e *StorageEngine) Delete(prm DeletePrm) (res DeleteRes, err error) {
+func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.delete(prm)
+ res, err = e.delete(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
+func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddDeleteDuration)()
}
@@ -95,7 +96,7 @@ func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
shPrm.ForceRemoval()
}
- _, err = sh.Inhume(shPrm)
+ _, err = sh.Inhume(ctx, shPrm)
if err != nil {
e.reportShardError(sh, "could not inhume object in shard", err)
@@ -113,13 +114,13 @@ func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
}
if splitInfo != nil {
- e.deleteChildren(prm.addr, prm.forceRemoval, splitInfo.SplitID())
+ e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
}
return DeleteRes{}, nil
}
-func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID *objectSDK.SplitID) {
+func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) {
var fs objectSDK.SearchFilters
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
@@ -144,7 +145,7 @@ func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID *ob
for _, addr := range res.AddressList() {
inhumePrm.MarkAsGarbage(addr)
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
e.log.Debug("could not inhume object in shard",
zap.Stringer("addr", addr),
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index ccba3d903..8a4e6a7fa 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"os"
"testing"
@@ -78,7 +79,7 @@ func TestDeleteBigObject(t *testing.T) {
deletePrm.WithForceRemoval()
deletePrm.WithAddress(addrParent)
- _, err := e.Delete(deletePrm)
+ _, err := e.Delete(context.Background(), deletePrm)
require.NoError(t, err)
checkGetError(t, e, addrParent, &apistatus.ObjectNotFound{})
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 680e773c6..2ecca5256 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -60,16 +60,16 @@ var errInhumeFailure = errors.New("inhume operation failed")
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Inhume(prm InhumePrm) (res InhumeRes, err error) {
+func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.inhume(prm)
+ res, err = e.inhume(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
+func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddInhumeDuration)()
}
@@ -98,12 +98,12 @@ func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
shPrm.MarkAsGarbage(prm.addrs[i])
}
- ok, err := e.inhumeAddr(prm.addrs[i], shPrm, true)
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true)
if err != nil {
return InhumeRes{}, err
}
if !ok {
- ok, err := e.inhumeAddr(prm.addrs[i], shPrm, false)
+ ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false)
if err != nil {
return InhumeRes{}, err
} else if !ok {
@@ -116,7 +116,7 @@ func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
}
// Returns ok if object was inhumed during this invocation or before.
-func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
+func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
root := false
var errLocked apistatus.ObjectLocked
var existPrm shard.ExistsPrm
@@ -154,7 +154,7 @@ func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm, checkE
}
}
- _, err := sh.Inhume(prm)
+ _, err := sh.Inhume(ctx, prm)
if err != nil {
switch {
case errors.As(err, &errLocked):
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index b001c6df9..e3150c17f 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"os"
"testing"
@@ -47,7 +48,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
@@ -75,7 +76,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
addrs, err := Select(e, cnr, fs)
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index f222ffe62..1014b2146 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -111,7 +111,7 @@ func TestLockUserScenario(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(tombAddr, objAddr)
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// 4.
@@ -124,7 +124,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
// 5.
@@ -135,7 +135,7 @@ func TestLockUserScenario(t *testing.T) {
inhumePrm.WithTarget(tombAddr, objAddr)
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
@@ -192,7 +192,7 @@ func TestLockExpiration(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// 3.
@@ -205,7 +205,7 @@ func TestLockExpiration(t *testing.T) {
// 4.
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
@@ -259,12 +259,12 @@ func TestLockForceRemoval(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// 4.
@@ -272,12 +272,12 @@ func TestLockForceRemoval(t *testing.T) {
deletePrm.WithAddress(objectcore.AddressOf(lock))
deletePrm.WithForceRemoval()
- _, err = e.Delete(deletePrm)
+ _, err = e.Delete(context.Background(), deletePrm)
require.NoError(t, err)
// 5.
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = e.Inhume(inhumePrm)
+ _, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 6886438e0..fec268350 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -269,7 +269,7 @@ func TestRefillMetabase(t *testing.T) {
var inhumePrm InhumePrm
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var headPrm HeadPrm
@@ -322,7 +322,7 @@ func TestRefillMetabase(t *testing.T) {
var prm InhumePrm
prm.MarkAsGarbage(addr)
- _, err := sh.Inhume(prm)
+ _, err := sh.Inhume(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked),
"object %s should be locked", locked[i])
}
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 199bb8b3f..40a5bf22e 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -60,7 +60,7 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
// if at least one object is locked.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
-func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
+func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
s.m.RLock()
if s.info.Mode.ReadOnly() {
@@ -119,7 +119,7 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
}
if deletedLockObjs := res.DeletedLockObjects(); len(deletedLockObjs) != 0 {
- s.deletedLockCallBack(context.Background(), deletedLockObjs)
+ s.deletedLockCallBack(ctx, deletedLockObjs)
}
return InhumeRes{}, nil
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index fca613941..191afab01 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -47,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
- _, err = sh.Inhume(inhPrm)
+ _, err = sh.Inhume(context.Background(), inhPrm)
require.NoError(t, err)
_, err = sh.Get(getPrm)
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 4e23e8c37..995aa1473 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -93,11 +93,11 @@ func TestShard_Lock(t *testing.T) {
var inhumePrm shard.InhumePrm
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
})
@@ -107,11 +107,11 @@ func TestShard_Lock(t *testing.T) {
var inhumePrm shard.InhumePrm
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(lock))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
})
@@ -120,7 +120,7 @@ func TestShard_Lock(t *testing.T) {
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
inhumePrm.ForceRemoval()
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// it should be possible to remove
@@ -129,7 +129,7 @@ func TestShard_Lock(t *testing.T) {
inhumePrm = shard.InhumePrm{}
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
- _, err = sh.Inhume(inhumePrm)
+ _, err = sh.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// check that object has been removed
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 959aebf8d..ba46881e2 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -127,7 +127,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < inhumedNumber; i++ {
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
- _, err := sh.Inhume(prm)
+ _, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
}
@@ -149,7 +149,7 @@ func TestCounters(t *testing.T) {
inhumedNumber := int(phy / 4)
prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
- _, err := sh.Inhume(prm)
+ _, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy, mm.objCounters[physical])
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index 7fa0a060f..7912d4e3e 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -17,7 +17,7 @@ import (
//
// If some address is not a valid object address in a binary format, an error returns.
// If request is unsigned or signed by disallowed key, permission error returns.
-func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
+func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
// verify request
if err := s.isValidRequest(req); err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -42,7 +42,7 @@ func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest)
prm.WithAddress(addrList[i])
//nolint: contextcheck
- _, err := s.s.Delete(prm)
+ _, err := s.s.Delete(ctx, prm)
if err != nil && firstErr == nil {
firstErr = err
}
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go
index 12e3a2eee..2e6a496f3 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/put/local.go
@@ -17,7 +17,7 @@ type ObjectStorage interface {
Put(*object.Object) error
// Delete must delete passed objects
// and return any appeared error.
- Delete(tombstone oid.Address, toDelete []oid.ID) error
+ Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
// Lock must lock passed objects
// and return any appeared error.
Lock(locker oid.Address, toLock []oid.ID) error
@@ -39,10 +39,10 @@ func (t *localTarget) WriteObject(obj *object.Object, meta objectCore.ContentMet
return nil
}
-func (t *localTarget) Close(_ context.Context) (*transformer.AccessIdentifiers, error) {
+func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
switch t.meta.Type() {
case object.TypeTombstone:
- err := t.storage.Delete(objectCore.AddressOf(t.obj), t.meta.Objects())
+ err := t.storage.Delete(ctx, objectCore.AddressOf(t.obj), t.meta.Objects())
if err != nil {
return nil, fmt.Errorf("could not delete objects from tombstone locally: %w", err)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 7d4b714f3..f393eb570 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -81,8 +81,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
prm.MarkAsGarbage(addrWithType.Address)
prm.WithForceRemoval()
- //nolint: contextcheck
- _, err := p.jobQueue.localStorage.Inhume(prm)
+ _, err := p.jobQueue.localStorage.Inhume(ctx, prm)
if err != nil {
p.log.Error("could not inhume object with missing container",
zap.Stringer("cid", idCnr),
@@ -134,7 +133,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
zap.Stringer("object", addr),
)
- p.cbRedundantCopy(addr)
+ p.cbRedundantCopy(ctx, addr)
}
}
diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go
index dba0c1cba..541ab599c 100644
--- a/pkg/services/policer/policer.go
+++ b/pkg/services/policer/policer.go
@@ -1,6 +1,7 @@
package policer
import (
+ "context"
"sync"
"time"
@@ -63,7 +64,7 @@ type Option func(*cfg)
// RedundantCopyCallback is a callback to pass
// the redundant local copy of the object.
-type RedundantCopyCallback func(oid.Address)
+type RedundantCopyCallback func(context.Context, oid.Address)
type cfg struct {
headTimeout time.Duration
From 080be5cfcdc27149c56eef4ed201b4a0dac93943 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 14:57:44 +0300
Subject: [PATCH 0037/1943] [#210] policier: Refactor object placement
Resolve containedctx and contextcheck linters.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/policer/check.go | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index f393eb570..6a2d9d327 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -105,9 +105,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
return
}
- c := &processPlacementContext{
- Context: ctx,
- }
+ c := &placementRequirements{}
var numOfContainerNodes int
for i := range nn {
@@ -124,8 +122,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
default:
}
- //nolint: contextcheck
- p.processNodes(c, addrWithType, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
+ p.processNodes(ctx, c, addrWithType, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
}
if !c.needLocalCopy && c.removeLocalCopy {
@@ -137,10 +134,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
}
}
-// nolint: containedctx
-type processPlacementContext struct {
- context.Context
-
+type placementRequirements struct {
// needLocalCopy is true if the current node must store an object according to the storage policy.
needLocalCopy bool
// removeLocalCopy is true if all copies are stored according to the storage policy
@@ -149,7 +143,7 @@ type processPlacementContext struct {
}
// nolint: funlen
-func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType objectcore.AddressWithType,
+func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
addr := addrWithType.Address
typ := addrWithType.Type
@@ -189,7 +183,7 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType object
}
if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
- ctx.needLocalCopy = true
+ requirements.needLocalCopy = true
shortage--
} else if nodes[i].IsMaintenance() {
@@ -253,7 +247,7 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addrWithType object
zap.Int("count", uncheckedCopies))
} else if uncheckedCopies == 0 {
// Safe to remove: checked all copies, shortage == 0.
- ctx.removeLocalCopy = true
+ requirements.removeLocalCopy = true
}
}
From d6486d172ec4f58004054b96de6a659d5c45cf3d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 15:29:12 +0300
Subject: [PATCH 0038/1943] [#210] policier: Refactor nodes processing
Resolve funlen linter for processNodes method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/policer/check.go | 44 +++++++++++++++++++++--------------
1 file changed, 26 insertions(+), 18 deletions(-)
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 6a2d9d327..9cdc4d813 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -13,6 +13,7 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
@@ -142,7 +143,6 @@ type placementRequirements struct {
removeLocalCopy bool
}
-// nolint: funlen
func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
addr := addrWithType.Address
@@ -152,21 +152,6 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
- handleMaintenance := func(node netmap.NodeInfo) {
- // consider remote nodes under maintenance as problem OK. Such
- // nodes MAY not respond with object, however, this is how we
- // prevent spam with new replicas.
- // However, additional copies should not be removed in this case,
- // because we can remove the only copy this way.
- checkedNodes.submitReplicaHolder(node)
- shortage--
- uncheckedCopies++
-
- p.log.Debug("consider node under maintenance as OK",
- zap.String("node", netmap.StringifyPublicKey(node)),
- )
- }
-
if typ == object.TypeLock {
// all nodes of a container must store the `LOCK` objects
// for correct object removal protection:
@@ -187,7 +172,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
shortage--
} else if nodes[i].IsMaintenance() {
- handleMaintenance(nodes[i])
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else {
if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
if status == 0 {
@@ -212,7 +197,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
}
if isClientErrMaintenance(err) {
- handleMaintenance(nodes[i])
+ shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else if err != nil {
p.log.Error("receive object header to check policy compliance",
zap.Stringer("object", addr),
@@ -228,6 +213,29 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
i--
}
+ p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
+}
+
+// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
+//
+// consider remote nodes under maintenance as problem OK. Such
+// nodes MAY not respond with object, however, this is how we
+// prevent spam with new replicas.
+// However, additional copies should not be removed in this case,
+// because we can remove the only copy this way.
+func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
+ checkedNodes.submitReplicaHolder(node)
+ shortage--
+ uncheckedCopies++
+
+ p.log.Debug("consider node under maintenance as OK",
+ zap.String("node", netmap.StringifyPublicKey(node)),
+ )
+ return shortage, uncheckedCopies
+}
+
+func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
+ nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) {
if shortage > 0 {
p.log.Debug("shortage of object copies detected",
zap.Stringer("object", addr),
From 6f7b6a8813e10c8516c29efa0eb71a48af14c3e7 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Thu, 30 Mar 2023 14:58:20 +0300
Subject: [PATCH 0039/1943] [#116] node: Improve shard/engine construction in
tests
* Introduce testEngineWrapper that can be constructed with different options
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
---
.../engine/control_test.go | 30 ++---
.../engine/delete_test.go | 2 +-
.../engine/engine_test.go | 113 ++++++++++--------
pkg/local_object_storage/engine/error_test.go | 54 +++++----
.../engine/evacuate_test.go | 39 +++---
pkg/local_object_storage/engine/head_test.go | 2 +-
.../engine/inhume_test.go | 4 +-
pkg/local_object_storage/engine/list_test.go | 2 +-
pkg/local_object_storage/engine/lock_test.go | 64 ++++++----
.../engine/shards_test.go | 7 +-
10 files changed, 173 insertions(+), 144 deletions(-)
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index f954d906a..2c44eb169 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -192,7 +192,8 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
}
func TestExecBlocks(t *testing.T) {
- e := testNewEngineWithShardNum(t, 2) // number doesn't matter in this test, 2 is several but not many
+ e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
+
t.Cleanup(func() {
os.RemoveAll(t.Name())
})
@@ -314,25 +315,26 @@ func TestReload(t *testing.T) {
// engineWithShards creates engine with specified number of shards. Returns
// slice of paths to their metabase and the engine.
-// TODO: #1776 unify engine construction in tests
func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []string) {
addPath := filepath.Join(path, "add")
currShards := make([]string, 0, num)
- e := New()
- for i := 0; i < num; i++ {
- id, err := e.AddShard(
- shard.WithBlobStorOptions(
- blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(i)), errSmallSize))),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- )
- require.NoError(t, err)
+ te := testNewEngine(t).
+ setShardsNumAdditionalOpts(t, num, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0700),
+ meta.WithEpochState(epochState{}),
+ ),
+ }
+ })
+ e, ids := te.engine, te.shardIDs
+ for _, id := range ids {
currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo()))
}
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 8a4e6a7fa..54d73cee8 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -54,7 +54,7 @@ func TestDeleteBigObject(t *testing.T) {
s2 := testNewShard(t, 2)
s3 := testNewShard(t, 3)
- e := testNewEngineWithShards(t, s1, s2, s3)
+ e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
e.log = &logger.Logger{Logger: zaptest.NewLogger(t)}
defer e.Close()
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 83dbcd093..ddaf88d18 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/atomic"
"go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
)
type epochState struct{}
@@ -50,7 +51,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
shards[i] = testNewShard(b, i)
}
- e := testNewEngineWithShards(b, shards...)
+ e := testNewEngine(b).setInitializedShards(b, shards...).engine
b.Cleanup(func() {
_ = e.Close()
_ = os.RemoveAll(b.Name())
@@ -75,24 +76,68 @@ func benchmarkExists(b *testing.B, shardNum int) {
}
}
-func testNewEngineWithShards(t testing.TB, shards ...*shard.Shard) *StorageEngine {
- engine := New()
+type testEngineWrapper struct {
+ engine *StorageEngine
+ shardIDs []*shard.ID
+}
+func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
+ engine := New(WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}))
+ for _, opt := range opts {
+ opt(engine.cfg)
+ }
+ return &testEngineWrapper{
+ engine: engine,
+ }
+}
+
+func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
for _, s := range shards {
pool, err := ants.NewPool(10, ants.WithNonblocking(true))
require.NoError(t, err)
- engine.shards[s.ID().String()] = hashedShard{
+ te.engine.shards[s.ID().String()] = hashedShard{
shardWrapper: shardWrapper{
errorCount: atomic.NewUint32(0),
Shard: s,
},
hash: hrw.Hash([]byte(s.ID().String())),
}
- engine.shardPools[s.ID().String()] = pool
+ te.engine.shardPools[s.ID().String()] = pool
+ te.shardIDs = append(te.shardIDs, s.ID())
+ }
+ return te
+}
+
+func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
+ shards := make([]*shard.Shard, 0, num)
+
+ for i := 0; i < num; i++ {
+ shards = append(shards, testNewShard(t, i))
}
- return engine
+ return te.setInitializedShards(t, shards...)
+}
+
+func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+ for i := 0; i < num; i++ {
+ opts := shardOpts(i)
+ id, err := te.engine.AddShard(opts...)
+ require.NoError(t, err)
+ te.shardIDs = append(te.shardIDs, id)
+ }
+ return te
+}
+
+func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
+ for i := 0; i < num; i++ {
+ defaultOpts := testDefaultShardOptions(t, i)
+ opts := append(defaultOpts, shardOpts(i)...)
+ id, err := te.engine.AddShard(opts...)
+ require.NoError(t, err)
+ te.shardIDs = append(te.shardIDs, id)
+ }
+ return te
}
func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
@@ -145,8 +190,17 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
sid, err := generateShardID()
require.NoError(t, err)
- s := shard.New(
- shard.WithID(sid),
+ shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t, id)...)
+ s := shard.New(shardOpts...)
+
+ require.NoError(t, s.Open())
+ require.NoError(t, s.Init(context.Background()))
+
+ return s
+}
+
+func testDefaultShardOptions(t testing.TB, id int) []shard.Option {
+ return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(
@@ -157,46 +211,5 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
meta.WithPermissions(0700),
meta.WithEpochState(epochState{}),
- ))
-
- require.NoError(t, s.Open())
- require.NoError(t, s.Init(context.Background()))
-
- return s
-}
-
-func testEngineFromShardOpts(t *testing.T, num int, extraOpts []shard.Option) *StorageEngine {
- engine := New()
- for i := 0; i < num; i++ {
- _, err := engine.AddShard(append([]shard.Option{
- shard.WithBlobStorOptions(
- blobstor.WithStorages(
- newStorages(filepath.Join(t.Name(), fmt.Sprintf("blobstor%d", i)),
- 1<<20)),
- ),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("metabase%d", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- shard.WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("pilorama%d", i)))),
- }, extraOpts...)...)
- require.NoError(t, err)
- }
-
- require.NoError(t, engine.Open())
- require.NoError(t, engine.Init(context.Background()))
-
- return engine
-}
-
-func testNewEngineWithShardNum(t *testing.T, num int) *StorageEngine {
- shards := make([]*shard.Shard, 0, num)
-
- for i := 0; i < num; i++ {
- shards = append(shards, testNewShard(t, i))
- }
-
- return testNewEngineWithShards(t, shards...)
+ )}
}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index 8a32c8b69..c9b194f6f 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -48,37 +48,39 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
t.Cleanup(func() { _ = os.RemoveAll(dir) })
}
- e := New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- WithShardPoolSize(1),
- WithErrorThreshold(errThreshold))
-
var testShards [2]*testShard
- for i := range testShards {
- storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(i)), errSmallSize)
- id, err := e.AddShard(
- shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ),
- shard.WithPiloramaOptions(
- pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", i))),
- pilorama.WithPerm(0700)))
- require.NoError(t, err)
-
- testShards[i] = &testShard{
- id: id,
- smallFileStorage: smallFileStorage,
- largeFileStorage: largeFileStorage,
- }
- }
+ te := testNewEngine(t,
+ WithShardPoolSize(1),
+ WithErrorThreshold(errThreshold),
+ ).
+ setShardsNumOpts(t, 2, func(id int) []shard.Option {
+ storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(id)), errSmallSize)
+ testShards[id] = &testShard{
+ smallFileStorage: smallFileStorage,
+ largeFileStorage: largeFileStorage,
+ }
+ return []shard.Option{
+ shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0700),
+ meta.WithEpochState(epochState{}),
+ ),
+ shard.WithPiloramaOptions(
+ pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
+ pilorama.WithPerm(0700)),
+ }
+ })
+ e := te.engine
require.NoError(t, e.Open())
require.NoError(t, e.Init(context.Background()))
+ for i, id := range te.shardIDs {
+ testShards[i].id = id
+ }
+
return &testEngine{
ng: e,
dir: dir,
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 04d68d2e4..51abc4b1c 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -29,28 +29,23 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(dir) })
- e := New(
- WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- WithShardPoolSize(1))
-
- ids := make([]*shard.ID, shardNum)
-
- for i := range ids {
- ids[i], err = e.AddShard(
- shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- shard.WithBlobStorOptions(
- blobstor.WithStorages([]blobstor.SubStorage{{
- Storage: fstree.New(
- fstree.WithPath(filepath.Join(dir, strconv.Itoa(i))),
- fstree.WithDepth(1)),
- }})),
- shard.WithMetaBaseOptions(
- meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
- meta.WithPermissions(0700),
- meta.WithEpochState(epochState{}),
- ))
- require.NoError(t, err)
- }
+ te := testNewEngine(t, WithShardPoolSize(1)).
+ setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
+ shard.WithBlobStorOptions(
+ blobstor.WithStorages([]blobstor.SubStorage{{
+ Storage: fstree.New(
+ fstree.WithPath(filepath.Join(dir, strconv.Itoa(id))),
+ fstree.WithDepth(1)),
+ }})),
+ shard.WithMetaBaseOptions(
+ meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
+ meta.WithPermissions(0700),
+ meta.WithEpochState(epochState{})),
+ }
+ })
+ e, ids := te.engine, te.shardIDs
require.NoError(t, e.Open())
require.NoError(t, e.Init(context.Background()))
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index 1ddfedc5c..e2a1edc98 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -44,7 +44,7 @@ func TestHeadRaw(t *testing.T) {
s1 := testNewShard(t, 1)
s2 := testNewShard(t, 2)
- e := testNewEngineWithShards(t, s1, s2)
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
defer e.Close()
var putPrmLeft shard.PutPrm
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index e3150c17f..4f8c96b99 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -39,7 +39,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
link.SetSplitID(splitID)
t.Run("delete small object", func(t *testing.T) {
- e := testNewEngineWithShardNum(t, 1)
+ e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close()
err := Put(e, parent)
@@ -60,7 +60,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
s1 := testNewShard(t, 1)
s2 := testNewShard(t, 2)
- e := testNewEngineWithShards(t, s1, s2)
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
defer e.Close()
var putChild shard.PutPrm
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index ad0eb1911..1261de9d4 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -16,7 +16,7 @@ import (
func TestListWithCursor(t *testing.T) {
s1 := testNewShard(t, 1)
s2 := testNewShard(t, 2)
- e := testNewEngineWithShards(t, s1, s2)
+ e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
t.Cleanup(func() {
e.Close()
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 1014b2146..fd3b04ef0 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -45,15 +45,21 @@ func TestLockUserScenario(t *testing.T) {
tombForLockID := oidtest.ID()
tombObj.SetID(tombForLockID)
- e := testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
+ testEngine := testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
- return pool
- }),
- shard.WithTombstoneSource(tss{lockerExpiresAfter}),
- })
+ return pool
+ }),
+ shard.WithTombstoneSource(tss{lockerExpiresAfter}),
+ }
+ })
+ e := testEngine.engine
+ require.NoError(t, e.Open())
+ require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() {
_ = e.Close()
@@ -146,14 +152,20 @@ func TestLockExpiration(t *testing.T) {
// 3. lock expiration epoch is coming
// 4. after some delay the object is not locked anymore
- e := testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
+ testEngine := testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
- return pool
- }),
- })
+ return pool
+ }),
+ }
+ })
+ e := testEngine.engine
+ require.NoError(t, e.Open())
+ require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() {
_ = e.Close()
@@ -218,16 +230,20 @@ func TestLockForceRemoval(t *testing.T) {
// 5. the object is not locked anymore
var e *StorageEngine
- e = testEngineFromShardOpts(t, 2, []shard.Option{
- shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
- pool, err := ants.NewPool(sz)
- require.NoError(t, err)
-
- return pool
- }),
- shard.WithDeletedLockCallback(e.processDeletedLocks),
- })
+ e = testNewEngine(t).
+ setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option {
+ return []shard.Option{
+ shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
+ pool, err := ants.NewPool(sz)
+ require.NoError(t, err)
+ return pool
+ }),
+ shard.WithDeletedLockCallback(e.processDeletedLocks),
+ }
+ }).engine
+ require.NoError(t, e.Open())
+ require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() {
_ = e.Close()
_ = os.RemoveAll(t.Name())
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index 67a006b5a..1bc0b880c 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -10,7 +10,8 @@ import (
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
- e := testNewEngineWithShardNum(t, numOfShards)
+ te := testNewEngine(t).setShardsNum(t, numOfShards)
+ e, ids := te.engine, te.shardIDs
t.Cleanup(func() {
e.Close()
os.RemoveAll(t.Name())
@@ -22,12 +23,12 @@ func TestRemoveShard(t *testing.T) {
removedNum := numOfShards / 2
mSh := make(map[string]bool, numOfShards)
- for i, sh := range e.DumpInfo().Shards {
+ for i, id := range ids {
if i == removedNum {
break
}
- mSh[sh.ID.String()] = true
+ mSh[id.String()] = true
}
for id, remove := range mSh {
From 279261ace36ce24efbac0cd509b185cea06318d4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 14:52:13 +0300
Subject: [PATCH 0040/1943] [#217] containersvc: Refactor route passing
Resolve containedctx for routeCtx.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/container.go | 10 ++---
.../announcement/load/controller/calls.go | 8 ++--
.../load/controller/calls_test.go | 4 +-
.../announcement/load/controller/deps.go | 26 ++++++++---
.../announcement/load/controller/util.go | 6 +--
.../announcement/load/route/calls.go | 45 +++++--------------
.../container/announcement/load/route/deps.go | 23 +---------
.../load/route/placement/calls.go | 6 +--
.../announcement/load/route/router.go | 5 ++-
.../container/announcement/load/route/util.go | 3 +-
10 files changed, 54 insertions(+), 82 deletions(-)
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 45c6e390c..7a88497eb 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -335,7 +335,7 @@ type remoteLoadAnnounceProvider struct {
deadEndProvider loadcontroller.WriterProvider
}
-func (r *remoteLoadAnnounceProvider) InitRemote(srv loadroute.ServerInfo) (loadcontroller.WriterProvider, error) {
+func (r *remoteLoadAnnounceProvider) InitRemote(srv loadcontroller.ServerInfo) (loadcontroller.WriterProvider, error) {
if srv == nil {
return r.deadEndProvider, nil
}
@@ -366,7 +366,7 @@ type remoteLoadAnnounceWriterProvider struct {
client client.Client
}
-func (p *remoteLoadAnnounceWriterProvider) InitWriter(ctx context.Context) (loadcontroller.Writer, error) {
+func (p *remoteLoadAnnounceWriterProvider) InitWriter([]loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
return &remoteLoadAnnounceWriter{
client: p.client,
}, nil
@@ -536,7 +536,7 @@ func (c *usedSpaceService) ExternalAddresses() []string {
}
func (c *usedSpaceService) AnnounceUsedSpace(ctx context.Context, req *containerV2.AnnounceUsedSpaceRequest) (*containerV2.AnnounceUsedSpaceResponse, error) {
- var passedRoute []loadroute.ServerInfo
+ var passedRoute []loadcontroller.ServerInfo
for hdr := req.GetVerificationHeader(); hdr != nil; hdr = hdr.GetOrigin() {
passedRoute = append(passedRoute, &containerOnlyKeyRemoteServerInfo{
@@ -550,7 +550,7 @@ func (c *usedSpaceService) AnnounceUsedSpace(ctx context.Context, req *container
passedRoute = append(passedRoute, c)
- w, err := c.loadWriterProvider.InitWriter(loadroute.NewRouteContext(ctx, passedRoute))
+ w, err := c.loadWriterProvider.InitWriter(passedRoute)
if err != nil {
return nil, fmt.Errorf("could not initialize container's used space writer: %w", err)
}
@@ -615,7 +615,7 @@ func (l *loadPlacementBuilder) isNodeFromContainerKey(epoch uint64, cnr cid.ID,
}
func (c *usedSpaceService) processLoadValue(_ context.Context, a containerSDK.SizeEstimation,
- route []loadroute.ServerInfo, w loadcontroller.Writer) error {
+ route []loadcontroller.ServerInfo, w loadcontroller.Writer) error {
fromCnr, err := c.loadPlacementBuilder.isNodeFromContainerKey(a.Epoch(), a.Container(), route[0].PublicKey())
if err != nil {
return fmt.Errorf("could not verify that the sender belongs to the container: %w", err)
diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go
index fde6913a7..8c5fbeacb 100644
--- a/pkg/services/container/announcement/load/controller/calls.go
+++ b/pkg/services/container/announcement/load/controller/calls.go
@@ -62,7 +62,7 @@ func (c *announceContext) announce() {
)
// initialize iterator over locally collected metrics
- metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator(c.ctx)
+ metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator()
if err != nil {
c.log.Debug("could not initialize iterator over locally collected metrics",
zap.String("error", err.Error()),
@@ -72,7 +72,7 @@ func (c *announceContext) announce() {
}
// initialize target of local announcements
- targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(c.ctx)
+ targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(nil)
if err != nil {
c.log.Debug("could not initialize announcement accumulator",
zap.String("error", err.Error()),
@@ -268,7 +268,7 @@ func (c *stopContext) report() {
)
// initialize iterator over locally accumulated announcements
- localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator(c.ctx)
+ localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator()
if err != nil {
c.log.Debug("could not initialize iterator over locally accumulated announcements",
zap.String("error", err.Error()),
@@ -278,7 +278,7 @@ func (c *stopContext) report() {
}
// initialize final destination of load estimations
- resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(c.ctx)
+ resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(nil)
if err != nil {
c.log.Debug("could not initialize result target",
zap.String("error", err.Error()),
diff --git a/pkg/services/container/announcement/load/controller/calls_test.go b/pkg/services/container/announcement/load/controller/calls_test.go
index 8e4a3cedd..4a791f4bd 100644
--- a/pkg/services/container/announcement/load/controller/calls_test.go
+++ b/pkg/services/container/announcement/load/controller/calls_test.go
@@ -28,7 +28,7 @@ func newTestStorage() *testAnnouncementStorage {
}
}
-func (s *testAnnouncementStorage) InitIterator(context.Context) (loadcontroller.Iterator, error) {
+func (s *testAnnouncementStorage) InitIterator() (loadcontroller.Iterator, error) {
if s.i != nil {
return s.i, nil
}
@@ -53,7 +53,7 @@ func (s *testAnnouncementStorage) Iterate(f loadcontroller.UsedSpaceFilter, h lo
return nil
}
-func (s *testAnnouncementStorage) InitWriter(context.Context) (loadcontroller.Writer, error) {
+func (s *testAnnouncementStorage) InitWriter([]loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
if s.w != nil {
return s.w, nil
}
diff --git a/pkg/services/container/announcement/load/controller/deps.go b/pkg/services/container/announcement/load/controller/deps.go
index 7f7a270b9..99da8594f 100644
--- a/pkg/services/container/announcement/load/controller/deps.go
+++ b/pkg/services/container/announcement/load/controller/deps.go
@@ -45,7 +45,7 @@ type IteratorProvider interface {
//
// Implementations can have different logic for different
// contexts, so specific ones may document their own behavior.
- InitIterator(context.Context) (Iterator, error)
+ InitIterator() (Iterator, error)
}
// Writer describes the interface for storing container.SizeEstimation values.
@@ -80,8 +80,24 @@ type WriterProvider interface {
//
// Initialization problems are reported via error.
// If no error was returned, then the Writer must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitWriter(context.Context) (Writer, error)
+ InitWriter(route []ServerInfo) (Writer, error)
+}
+
+// ServerInfo describes a set of
+// characteristics of a point in a route.
+type ServerInfo interface {
+ // PublicKey returns public key of the node
+ // from the route in a binary representation.
+ PublicKey() []byte
+
+ // Iterates over network addresses of the node
+ // in the route. Breaks iterating on true return
+ // of the handler.
+ IterateAddresses(func(string) bool)
+
+ // Returns number of server's network addresses.
+ NumberOfAddresses() int
+
+ // ExternalAddresses returns external node's addresses.
+ ExternalAddresses() []string
}
diff --git a/pkg/services/container/announcement/load/controller/util.go b/pkg/services/container/announcement/load/controller/util.go
index fb356393d..223de13ba 100644
--- a/pkg/services/container/announcement/load/controller/util.go
+++ b/pkg/services/container/announcement/load/controller/util.go
@@ -1,8 +1,6 @@
package loadcontroller
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
)
@@ -17,11 +15,11 @@ type storageWrapper struct {
i Iterator
}
-func (s storageWrapper) InitIterator(context.Context) (Iterator, error) {
+func (s storageWrapper) InitIterator() (Iterator, error) {
return s.i, nil
}
-func (s storageWrapper) InitWriter(context.Context) (Writer, error) {
+func (s storageWrapper) InitWriter([]ServerInfo) (Writer, error) {
return s.w, nil
}
diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go
index 1cdd65911..83c368f57 100644
--- a/pkg/services/container/announcement/load/route/calls.go
+++ b/pkg/services/container/announcement/load/route/calls.go
@@ -10,26 +10,9 @@ import (
"go.uber.org/zap"
)
-// nolint: containedctx
-type routeContext struct {
- context.Context
-
- passedRoute []ServerInfo
-}
-
-// NewRouteContext wraps the main context of value passing with its traversal route.
-//
-// Passing the result to Router.InitWriter method will allow you to continue this route.
-func NewRouteContext(ctx context.Context, passed []ServerInfo) context.Context {
- return &routeContext{
- Context: ctx,
- passedRoute: passed,
- }
-}
-
// InitWriter initializes and returns Writer that sends each value to its next route point.
//
-// If ctx was created by NewRouteContext, then the traversed route is taken into account,
+// If route is present, then it is taken into account,
// and the value will be sent to its continuation. Otherwise, the route will be laid
// from scratch and the value will be sent to its primary point.
//
@@ -41,22 +24,14 @@ func NewRouteContext(ctx context.Context, passed []ServerInfo) context.Context {
// runtime and never returns an error.
//
// Always returns nil error.
-func (r *Router) InitWriter(ctx context.Context) (loadcontroller.Writer, error) {
- var (
- routeCtx *routeContext
- ok bool
- )
-
- if routeCtx, ok = ctx.(*routeContext); !ok {
- routeCtx = &routeContext{
- Context: ctx,
- passedRoute: []ServerInfo{r.localSrvInfo},
- }
+func (r *Router) InitWriter(route []loadcontroller.ServerInfo) (loadcontroller.Writer, error) {
+ if len(route) == 0 {
+ route = []loadcontroller.ServerInfo{r.localSrvInfo}
}
return &loadWriter{
router: r,
- ctx: routeCtx,
+ route: route,
mRoute: make(map[routeKey]*valuesRoute),
mServers: make(map[string]loadcontroller.Writer),
}, nil
@@ -69,7 +44,7 @@ type routeKey struct {
}
type valuesRoute struct {
- route []ServerInfo
+ route []loadcontroller.ServerInfo
values []container.SizeEstimation
}
@@ -77,7 +52,7 @@ type valuesRoute struct {
type loadWriter struct {
router *Router
- ctx *routeContext
+ route []loadcontroller.ServerInfo
routeMtx sync.RWMutex
mRoute map[routeKey]*valuesRoute
@@ -96,11 +71,11 @@ func (w *loadWriter) Put(a container.SizeEstimation) error {
routeValues, ok := w.mRoute[key]
if !ok {
- route, err := w.router.routeBuilder.NextStage(a, w.ctx.passedRoute)
+ route, err := w.router.routeBuilder.NextStage(a, w.route)
if err != nil {
return err
} else if len(route) == 0 {
- route = []ServerInfo{nil}
+ route = []loadcontroller.ServerInfo{nil}
}
routeValues = &valuesRoute{
@@ -129,7 +104,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error {
continue // best effort
}
- remoteWriter, err = provider.InitWriter(w.ctx)
+ remoteWriter, err = provider.InitWriter(w.route)
if err != nil {
w.router.log.Debug("could not initialize writer",
zap.String("error", err.Error()),
diff --git a/pkg/services/container/announcement/load/route/deps.go b/pkg/services/container/announcement/load/route/deps.go
index 429cda3eb..b255900f7 100644
--- a/pkg/services/container/announcement/load/route/deps.go
+++ b/pkg/services/container/announcement/load/route/deps.go
@@ -5,25 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
)
-// ServerInfo describes a set of
-// characteristics of a point in a route.
-type ServerInfo interface {
- // PublicKey returns public key of the node
- // from the route in a binary representation.
- PublicKey() []byte
-
- // Iterates over network addresses of the node
- // in the route. Breaks iterating on true return
- // of the handler.
- IterateAddresses(func(string) bool)
-
- // Returns number of server's network addresses.
- NumberOfAddresses() int
-
- // ExternalAddresses returns external node's addresses.
- ExternalAddresses() []string
-}
-
// Builder groups methods to route values in the network.
type Builder interface {
// NextStage must return next group of route points for the value a
@@ -36,7 +17,7 @@ type Builder interface {
// in that list (means that point is the last point in one of the route groups),
// returned route must contain nil point that should be interpreted as signal to,
// among sending to other route points, save the announcement in that point.
- NextStage(a container.SizeEstimation, passed []ServerInfo) ([]ServerInfo, error)
+ NextStage(a container.SizeEstimation, passed []loadcontroller.ServerInfo) ([]loadcontroller.ServerInfo, error)
}
// RemoteWriterProvider describes the component
@@ -46,5 +27,5 @@ type RemoteWriterProvider interface {
// corresponding to info.
//
// Nil info matches the end of the route.
- InitRemote(info ServerInfo) (loadcontroller.WriterProvider, error)
+ InitRemote(info loadcontroller.ServerInfo) (loadcontroller.WriterProvider, error)
}
diff --git a/pkg/services/container/announcement/load/route/placement/calls.go b/pkg/services/container/announcement/load/route/placement/calls.go
index 3db0d967c..68bdb43a7 100644
--- a/pkg/services/container/announcement/load/route/placement/calls.go
+++ b/pkg/services/container/announcement/load/route/placement/calls.go
@@ -5,7 +5,7 @@ import (
"fmt"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- loadroute "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/route"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
)
@@ -15,7 +15,7 @@ import (
// If passed route has more than one point, then endpoint of the route is reached.
//
// The traversed route is not checked, it is assumed to be correct.
-func (b *Builder) NextStage(a container.SizeEstimation, passed []loadroute.ServerInfo) ([]loadroute.ServerInfo, error) {
+func (b *Builder) NextStage(a container.SizeEstimation, passed []loadcontroller.ServerInfo) ([]loadcontroller.ServerInfo, error) {
if len(passed) > 1 {
return nil, nil
}
@@ -27,7 +27,7 @@ func (b *Builder) NextStage(a container.SizeEstimation, passed []loadroute.Serve
return nil, fmt.Errorf("could not build placement %s: %w", cnr, err)
}
- res := make([]loadroute.ServerInfo, 0, len(placement))
+ res := make([]loadcontroller.ServerInfo, 0, len(placement))
for i := range placement {
if len(placement[i]) == 0 {
diff --git a/pkg/services/container/announcement/load/route/router.go b/pkg/services/container/announcement/load/route/router.go
index 6169a2aee..c8f784b16 100644
--- a/pkg/services/container/announcement/load/route/router.go
+++ b/pkg/services/container/announcement/load/route/router.go
@@ -3,6 +3,7 @@ package loadroute
import (
"fmt"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
)
@@ -15,7 +16,7 @@ type Prm struct {
// Characteristics of the local node's server.
//
// Must not be nil.
- LocalServerInfo ServerInfo
+ LocalServerInfo loadcontroller.ServerInfo
// Component for sending values to a fixed route point.
//
@@ -46,7 +47,7 @@ type Router struct {
routeBuilder Builder
- localSrvInfo ServerInfo
+ localSrvInfo loadcontroller.ServerInfo
}
const invalidPrmValFmt = "invalid parameter %s (%T):%v"
diff --git a/pkg/services/container/announcement/load/route/util.go b/pkg/services/container/announcement/load/route/util.go
index fca1e5796..ea0f51aad 100644
--- a/pkg/services/container/announcement/load/route/util.go
+++ b/pkg/services/container/announcement/load/route/util.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
+ loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
)
@@ -12,7 +13,7 @@ var errWrongRoute = errors.New("wrong route")
// CheckRoute checks if the route is a route correctly constructed by the builder for value a.
//
// Returns nil if route is correct, otherwise an error clarifying the inconsistency.
-func CheckRoute(builder Builder, a container.SizeEstimation, route []ServerInfo) error {
+func CheckRoute(builder Builder, a container.SizeEstimation, route []loadcontroller.ServerInfo) error {
for i := 1; i < len(route); i++ {
servers, err := builder.NextStage(a, route[:i])
if err != nil {
From 206458c841c74555b11685002e8e4826cd167621 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 16:28:09 +0300
Subject: [PATCH 0041/1943] [#217] containersvc: Resolve containedctx linter
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/container.go | 10 +--
cmd/frostfs-node/main.go | 2 +-
.../announcement/load/controller/calls.go | 70 +++++++++----------
.../load/controller/calls_test.go | 8 +--
4 files changed, 43 insertions(+), 47 deletions(-)
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 7a88497eb..6c864431d 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -39,7 +39,7 @@ const (
stopEstimationNotifyEvent = "StopEstimation"
)
-func initContainerService(c *cfg) {
+func initContainerService(ctx context.Context, c *cfg) {
// container wrapper that tries to invoke notary
// requests if chain is configured so
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary())
@@ -77,7 +77,7 @@ func initContainerService(c *cfg) {
loadroute.WithLogger(c.log),
)
- setLoadController(c, loadRouter, loadAccumulator)
+ setLoadController(ctx, c, loadRouter, loadAccumulator)
server := containerTransportGRPC.New(
containerService.NewSignService(
@@ -180,7 +180,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
return cnrRdr, cnrWrt
}
-func setLoadController(c *cfg, loadRouter *loadroute.Router, loadAccumulator *loadstorage.Storage) {
+func setLoadController(ctx context.Context, c *cfg, loadRouter *loadroute.Router, loadAccumulator *loadstorage.Storage) {
pubKey := c.key.PublicKey().Bytes()
// container wrapper that always sends non-notary
@@ -211,14 +211,14 @@ func setLoadController(c *cfg, loadRouter *loadroute.Router, loadAccumulator *lo
setContainerNotificationParser(c, startEstimationNotifyEvent, containerEvent.ParseStartEstimation)
addContainerAsyncNotificationHandler(c, startEstimationNotifyEvent, func(ev event.Event) {
- ctrl.Start(loadcontroller.StartPrm{
+ ctrl.Start(ctx, loadcontroller.StartPrm{
Epoch: ev.(containerEvent.StartEstimation).Epoch(),
})
})
setContainerNotificationParser(c, stopEstimationNotifyEvent, containerEvent.ParseStopEstimation)
addContainerAsyncNotificationHandler(c, stopEstimationNotifyEvent, func(ev event.Event) {
- ctrl.Stop(loadcontroller.StopPrm{
+ ctrl.Stop(ctx, loadcontroller.StopPrm{
Epoch: ev.(containerEvent.StopEstimation).Epoch(),
})
})
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index 7768409b0..fdb003220 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -97,7 +97,7 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(c, "gRPC", initGRPC)
initAndLog(c, "netmap", initNetmapService)
initAndLog(c, "accounting", initAccountingService)
- initAndLog(c, "container", initContainerService)
+ initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
initAndLog(c, "reputation", initReputationService)
initAndLog(c, "notification", initNotifications)
diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go
index 8c5fbeacb..f5d5d1a3d 100644
--- a/pkg/services/container/announcement/load/controller/calls.go
+++ b/pkg/services/container/announcement/load/controller/calls.go
@@ -15,18 +15,15 @@ type StartPrm struct {
Epoch uint64
}
-// nolint: containedctx
type commonContext struct {
epoch uint64
ctrl *Controller
log *logger.Logger
-
- ctx context.Context
}
-type announceContext struct {
+type announcer struct {
commonContext
}
@@ -39,21 +36,22 @@ type announceContext struct {
//
// Each call acquires an announcement context for an Epoch parameter.
// At the very end of the operation, the context is released.
-func (c *Controller) Start(prm StartPrm) {
+func (c *Controller) Start(ctx context.Context, prm StartPrm) {
+ var announcer *announcer
// acquire announcement
- execCtx := c.acquireAnnouncement(prm)
- if execCtx == nil {
+ ctx, announcer = c.acquireAnnouncement(ctx, prm)
+ if announcer == nil {
return
}
// finally stop and free the announcement
- defer execCtx.freeAnnouncement()
+ defer announcer.freeAnnouncement()
// announce local values
- execCtx.announce()
+ announcer.announce(ctx)
}
-func (c *announceContext) announce() {
+func (c *announcer) announce(ctx context.Context) {
c.log.Debug("starting to announce the values of the metrics")
var (
@@ -100,7 +98,7 @@ func (c *announceContext) announce() {
}
// finish writing
- err = targetWriter.Close(c.ctx)
+ err = targetWriter.Close(ctx)
if err != nil {
c.log.Debug("could not finish writing local announcements",
zap.String("error", err.Error()),
@@ -112,35 +110,32 @@ func (c *announceContext) announce() {
c.log.Debug("trust announcement successfully finished")
}
-func (c *Controller) acquireAnnouncement(prm StartPrm) *announceContext {
- var ctx context.Context
-
+func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (context.Context, *announcer) {
+ started := true
c.announceMtx.Lock()
-
{
if cancel := c.mAnnounceCtx[prm.Epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
+ ctx, cancel = context.WithCancel(ctx)
c.mAnnounceCtx[prm.Epoch] = cancel
+ started = false
}
}
-
c.announceMtx.Unlock()
log := &logger.Logger{Logger: c.opts.log.With(
zap.Uint64("epoch", prm.Epoch),
)}
- if ctx == nil {
+ if started {
log.Debug("announcement is already started")
- return nil
+ return ctx, nil
}
- return &announceContext{
+ return ctx, &announcer{
commonContext: commonContext{
epoch: prm.Epoch,
ctrl: c,
log: log,
- ctx: ctx,
},
}
}
@@ -176,7 +171,7 @@ type StopPrm struct {
Epoch uint64
}
-type stopContext struct {
+type reporter struct {
commonContext
}
@@ -188,31 +183,32 @@ type stopContext struct {
//
// Each call acquires a report context for an Epoch parameter.
// At the very end of the operation, the context is released.
-func (c *Controller) Stop(prm StopPrm) {
- execCtx := c.acquireReport(prm)
- if execCtx == nil {
+func (c *Controller) Stop(ctx context.Context, prm StopPrm) {
+ var reporter *reporter
+ ctx, reporter = c.acquireReport(ctx, prm)
+ if reporter == nil {
return
}
// finally stop and free reporting
- defer execCtx.freeReport()
+ defer reporter.freeReport()
// interrupt announcement
- execCtx.freeAnnouncement()
+ reporter.freeAnnouncement()
// report the estimations
- execCtx.report()
+ reporter.report(ctx)
}
-func (c *Controller) acquireReport(prm StopPrm) *stopContext {
- var ctx context.Context
+func (c *Controller) acquireReport(ctx context.Context, prm StopPrm) (context.Context, *reporter) {
+ started := true
c.reportMtx.Lock()
-
{
if cancel := c.mReportCtx[prm.Epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
+ ctx, cancel = context.WithCancel(ctx)
c.mReportCtx[prm.Epoch] = cancel
+ started = false
}
}
@@ -222,12 +218,12 @@ func (c *Controller) acquireReport(prm StopPrm) *stopContext {
zap.Uint64("epoch", prm.Epoch),
)}
- if ctx == nil {
+ if started {
log.Debug("report is already started")
- return nil
+ return ctx, nil
}
- return &stopContext{
+ return ctx, &reporter{
commonContext: commonContext{
epoch: prm.Epoch,
ctrl: c,
@@ -261,7 +257,7 @@ func (c *commonContext) freeReport() {
}
}
-func (c *stopContext) report() {
+func (c *reporter) report(ctx context.Context) {
var (
localIterator Iterator
err error
@@ -301,7 +297,7 @@ func (c *stopContext) report() {
}
// finish writing
- err = resultWriter.Close(c.ctx)
+ err = resultWriter.Close(ctx)
if err != nil {
c.log.Debug("could not finish writing load estimations",
zap.String("error", err.Error()),
diff --git a/pkg/services/container/announcement/load/controller/calls_test.go b/pkg/services/container/announcement/load/controller/calls_test.go
index 4a791f4bd..6ca24e869 100644
--- a/pkg/services/container/announcement/load/controller/calls_test.go
+++ b/pkg/services/container/announcement/load/controller/calls_test.go
@@ -143,12 +143,12 @@ func TestSimpleScenario(t *testing.T) {
// start both controllers
go func() {
- ctrlN1.Start(startPrm)
+ ctrlN1.Start(context.Background(), startPrm)
wg.Done()
}()
go func() {
- ctrlN2.Start(startPrm)
+ ctrlN2.Start(context.Background(), startPrm)
wg.Done()
}()
@@ -161,12 +161,12 @@ func TestSimpleScenario(t *testing.T) {
// stop both controllers
go func() {
- ctrlN1.Stop(stopPrm)
+ ctrlN1.Stop(context.Background(), stopPrm)
wg.Done()
}()
go func() {
- ctrlN2.Stop(stopPrm)
+ ctrlN2.Stop(context.Background(), stopPrm)
wg.Done()
}()
From 1bf21dbb473d319308cf999a5fe5d2688f27fee6 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 14:49:40 +0300
Subject: [PATCH 0042/1943] [#193] getsvc: Resolve context linters
Resolve containedctx and contextcheck linters.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/assemble.go | 10 ++++------
pkg/services/object/get/container.go | 8 ++++----
pkg/services/object/get/exec.go | 21 +++++++--------------
pkg/services/object/get/get.go | 18 ++++++++----------
pkg/services/object/get/get_test.go | 2 +-
pkg/services/object/get/local.go | 5 +++--
pkg/services/object/get/remote.go | 5 ++---
pkg/services/object/get/service.go | 4 +++-
pkg/services/object/get/util.go | 14 +++++++-------
9 files changed, 39 insertions(+), 48 deletions(-)
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index ebae18eb5..db71df6a4 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -10,7 +10,7 @@ import (
"go.uber.org/zap"
)
-func (exec *execCtx) assemble() {
+func (exec *execCtx) assemble(ctx context.Context) {
if !exec.canAssemble() {
exec.log.Debug("can not assemble the object")
return
@@ -49,7 +49,7 @@ func (exec *execCtx) assemble() {
zap.Uint64("range_length", exec.ctxRange().GetLength()),
)
- obj, err := assembler.Assemble(exec.context(), exec.prm.objWriter)
+ obj, err := assembler.Assemble(ctx, exec.prm.objWriter)
if err != nil {
exec.log.Warn("failed to assemble splitted object",
zap.Error(err),
@@ -107,8 +107,7 @@ func (exec *execCtx) HeadObject(ctx context.Context, id oid.ID) (*objectSDK.Obje
w := NewSimpleObjectWriter()
prm.SetHeaderWriter(w)
- //nolint: contextcheck
- err := exec.svc.Head(exec.context(), prm)
+ err := exec.svc.Head(ctx, prm)
if err != nil {
return nil, err
@@ -128,8 +127,7 @@ func (exec *execCtx) GetObject(ctx context.Context, id oid.ID, rng *objectSDK.Ra
p.addr.SetContainer(exec.containerID())
p.addr.SetObject(id)
- //nolint: contextcheck
- statusError := exec.svc.get(exec.context(), p.commonPrm, withPayloadRange(rng))
+ statusError := exec.svc.get(ctx, p.commonPrm, withPayloadRange(rng))
if statusError.err != nil {
return nil, statusError.err
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index 882861129..cfb538d38 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -7,7 +7,7 @@ import (
"go.uber.org/zap"
)
-func (exec *execCtx) executeOnContainer() {
+func (exec *execCtx) executeOnContainer(ctx context.Context) {
if exec.isLocal() {
exec.log.Debug("return result directly")
return
@@ -26,7 +26,7 @@ func (exec *execCtx) executeOnContainer() {
}
for {
- if exec.processCurrentEpoch() {
+ if exec.processCurrentEpoch(ctx) {
break
}
@@ -42,7 +42,7 @@ func (exec *execCtx) executeOnContainer() {
}
}
-func (exec *execCtx) processCurrentEpoch() bool {
+func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
exec.log.Debug("process epoch",
zap.Uint64("number", exec.curProcEpoch),
)
@@ -52,7 +52,7 @@ func (exec *execCtx) processCurrentEpoch() bool {
return true
}
- ctx, cancel := context.WithCancel(exec.context())
+ ctx, cancel := context.WithCancel(ctx)
defer cancel()
exec.status = statusUndefined
diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go
index 9858b32b2..2ba014574 100644
--- a/pkg/services/object/get/exec.go
+++ b/pkg/services/object/get/exec.go
@@ -19,12 +19,9 @@ type statusError struct {
err error
}
-// nolint: containedctx
type execCtx struct {
svc *Service
- ctx context.Context
-
prm RangePrm
statusError
@@ -80,10 +77,6 @@ func (exec *execCtx) setLogger(l *logger.Logger) {
)}
}
-func (exec execCtx) context() context.Context {
- return exec.ctx
-}
-
func (exec execCtx) isLocal() bool {
return exec.prm.common.LocalOnly()
}
@@ -217,13 +210,13 @@ func mergeSplitInfo(dst, src *objectSDK.SplitInfo) {
}
}
-func (exec *execCtx) writeCollectedHeader() bool {
+func (exec *execCtx) writeCollectedHeader(ctx context.Context) bool {
if exec.ctxRange() != nil {
return true
}
err := exec.prm.objWriter.WriteHeader(
- exec.context(),
+ ctx,
exec.collectedObject.CutPayload(),
)
@@ -243,12 +236,12 @@ func (exec *execCtx) writeCollectedHeader() bool {
return exec.status == statusOK
}
-func (exec *execCtx) writeObjectPayload(obj *objectSDK.Object) bool {
+func (exec *execCtx) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) bool {
if exec.headOnly() {
return true
}
- err := exec.prm.objWriter.WriteChunk(exec.context(), obj.Payload())
+ err := exec.prm.objWriter.WriteChunk(ctx, obj.Payload())
switch {
default:
@@ -266,9 +259,9 @@ func (exec *execCtx) writeObjectPayload(obj *objectSDK.Object) bool {
return err == nil
}
-func (exec *execCtx) writeCollectedObject() {
- if ok := exec.writeCollectedHeader(); ok {
- exec.writeObjectPayload(exec.collectedObject)
+func (exec *execCtx) writeCollectedObject(ctx context.Context) {
+ if ok := exec.writeCollectedHeader(ctx); ok {
+ exec.writeObjectPayload(ctx, exec.collectedObject)
}
}
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index cdb2d96fd..0f5983e99 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -65,7 +65,6 @@ func (s *Service) Head(ctx context.Context, prm HeadPrm) error {
func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) statusError {
exec := &execCtx{
svc: s,
- ctx: ctx,
prm: RangePrm{
commonPrm: prm,
},
@@ -78,22 +77,21 @@ func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) st
exec.setLogger(s.log)
- //nolint: contextcheck
- exec.execute()
+ exec.execute(ctx)
return exec.statusError
}
-func (exec *execCtx) execute() {
+func (exec *execCtx) execute(ctx context.Context) {
exec.log.Debug("serving request...")
// perform local operation
- exec.executeLocal()
+ exec.executeLocal(ctx)
- exec.analyzeStatus(true)
+ exec.analyzeStatus(ctx, true)
}
-func (exec *execCtx) analyzeStatus(execCnr bool) {
+func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
@@ -102,7 +100,7 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
exec.log.Debug("requested object was marked as removed")
case statusVIRTUAL:
exec.log.Debug("requested object is virtual")
- exec.assemble()
+ exec.assemble(ctx)
case statusOutOfRange:
exec.log.Debug("requested range is out of object bounds")
default:
@@ -111,8 +109,8 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
)
if execCnr {
- exec.executeOnContainer()
- exec.analyzeStatus(false)
+ exec.executeOnContainer(ctx)
+ exec.analyzeStatus(ctx, false)
}
}
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 36a0e4976..3d1a95cbb 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -117,7 +117,7 @@ func newTestClient() *testClient {
}
}
-func (c *testClient) getObject(exec *execCtx, _ client.NodeInfo) (*objectSDK.Object, error) {
+func (c *testClient) getObject(ctx context.Context, exec *execCtx, _ client.NodeInfo) (*objectSDK.Object, error) {
v, ok := c.results[exec.address().EncodeToString()]
if !ok {
var errNotFound apistatus.ObjectNotFound
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index f526af4e6..a6a77729c 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -1,6 +1,7 @@
package getsvc
import (
+ "context"
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -8,7 +9,7 @@ import (
"go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
+func (exec *execCtx) executeLocal(ctx context.Context) {
var err error
exec.collectedObject, err = exec.svc.localStorage.get(exec)
@@ -28,7 +29,7 @@ func (exec *execCtx) executeLocal() {
case err == nil:
exec.status = statusOK
exec.err = nil
- exec.writeCollectedObject()
+ exec.writeCollectedObject(ctx)
case errors.As(err, &errRemoved):
exec.status = statusINHUMED
exec.err = errRemoved
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index fbfb01bcd..1532bade0 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -18,7 +18,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
return true
}
- obj, err := client.getObject(exec, info)
+ obj, err := client.getObject(ctx, exec, info)
var errSplitInfo *objectSDK.SplitInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
@@ -43,8 +43,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
// has already been streamed to the requesting party
if obj != nil {
exec.collectedObject = obj
- //nolint: contextcheck
- exec.writeCollectedObject()
+ exec.writeCollectedObject(ctx)
}
case errors.As(err, &errRemoved):
exec.status = statusINHUMED
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index e69ab4f0f..dfa3b48ac 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -1,6 +1,8 @@
package getsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
@@ -22,7 +24,7 @@ type Service struct {
type Option func(*cfg)
type getClient interface {
- getObject(*execCtx, client.NodeInfo) (*object.Object, error)
+ getObject(context.Context, *execCtx, client.NodeInfo) (*object.Object, error)
}
type cfg struct {
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index d647b07f6..08c738280 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -88,7 +88,7 @@ func (c *clientCacheWrapper) get(info coreclient.NodeInfo) (getClient, error) {
}
// nolint: funlen
-func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) {
+func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) {
if exec.isForwardingEnabled() {
return exec.prm.forwarder(info, c.client)
}
@@ -101,7 +101,7 @@ func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*obj
if exec.headOnly() {
var prm internalclient.HeadObjectPrm
- prm.SetContext(exec.context())
+ prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
@@ -127,7 +127,7 @@ func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*obj
if rng := exec.ctxRange(); rng != nil {
var prm internalclient.PayloadRangePrm
- prm.SetContext(exec.context())
+ prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
@@ -148,7 +148,7 @@ func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*obj
if errors.As(err, &errAccessDenied) {
// Current spec allows other storage node to deny access,
// fallback to GET here.
- obj, err := c.get(exec, key)
+ obj, err := c.get(ctx, exec, key)
if err != nil {
return nil, err
}
@@ -169,13 +169,13 @@ func (c *clientWrapper) getObject(exec *execCtx, info coreclient.NodeInfo) (*obj
return payloadOnlyObject(res.PayloadRange()), nil
}
- return c.get(exec, key)
+ return c.get(ctx, exec, key)
}
-func (c *clientWrapper) get(exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
+func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
var prm internalclient.GetObjectPrm
- prm.SetContext(exec.context())
+ prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
From 91ead04fa47ca2827e5f234b037766eb81b149ec Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 15:44:18 +0300
Subject: [PATCH 0043/1943] [#193] getsvc: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/util.go | 137 +++++++++++++++++---------------
1 file changed, 72 insertions(+), 65 deletions(-)
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index 08c738280..a68232c9a 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -87,7 +87,6 @@ func (c *clientCacheWrapper) get(info coreclient.NodeInfo) (getClient, error) {
}, nil
}
-// nolint: funlen
func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) {
if exec.isForwardingEnabled() {
return exec.prm.forwarder(info, c.client)
@@ -99,79 +98,87 @@ func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info corec
}
if exec.headOnly() {
- var prm internalclient.HeadObjectPrm
-
- prm.SetContext(ctx)
- prm.SetClient(c.client)
- prm.SetTTL(exec.prm.common.TTL())
- prm.SetNetmapEpoch(exec.curProcEpoch)
- prm.SetAddress(exec.address())
- prm.SetPrivateKey(key)
- prm.SetSessionToken(exec.prm.common.SessionToken())
- prm.SetBearerToken(exec.prm.common.BearerToken())
- prm.SetXHeaders(exec.prm.common.XHeaders())
-
- if exec.isRaw() {
- prm.SetRawFlag()
- }
-
- res, err := internalclient.HeadObject(prm)
- if err != nil {
- return nil, err
- }
-
- return res.Header(), nil
+ return c.getHeadOnly(ctx, exec, key)
}
// we don't specify payload writer because we accumulate
// the object locally (even huge).
if rng := exec.ctxRange(); rng != nil {
- var prm internalclient.PayloadRangePrm
-
- prm.SetContext(ctx)
- prm.SetClient(c.client)
- prm.SetTTL(exec.prm.common.TTL())
- prm.SetNetmapEpoch(exec.curProcEpoch)
- prm.SetAddress(exec.address())
- prm.SetPrivateKey(key)
- prm.SetSessionToken(exec.prm.common.SessionToken())
- prm.SetBearerToken(exec.prm.common.BearerToken())
- prm.SetXHeaders(exec.prm.common.XHeaders())
- prm.SetRange(rng)
-
- if exec.isRaw() {
- prm.SetRawFlag()
- }
-
- res, err := internalclient.PayloadRange(prm)
- if err != nil {
- var errAccessDenied *apistatus.ObjectAccessDenied
- if errors.As(err, &errAccessDenied) {
- // Current spec allows other storage node to deny access,
- // fallback to GET here.
- obj, err := c.get(ctx, exec, key)
- if err != nil {
- return nil, err
- }
-
- payload := obj.Payload()
- from := rng.GetOffset()
- to := from + rng.GetLength()
-
- if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
- return nil, new(apistatus.ObjectOutOfRange)
- }
-
- return payloadOnlyObject(payload[from:to]), nil
- }
- return nil, err
- }
-
- return payloadOnlyObject(res.PayloadRange()), nil
+ // Current spec allows other storage node to deny access,
+ // fallback to GET here.
+ return c.getRange(ctx, exec, key, rng)
}
return c.get(ctx, exec, key)
}
+func (c *clientWrapper) getRange(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey, rng *object.Range) (*object.Object, error) {
+ var prm internalclient.PayloadRangePrm
+
+ prm.SetContext(ctx)
+ prm.SetClient(c.client)
+ prm.SetTTL(exec.prm.common.TTL())
+ prm.SetNetmapEpoch(exec.curProcEpoch)
+ prm.SetAddress(exec.address())
+ prm.SetPrivateKey(key)
+ prm.SetSessionToken(exec.prm.common.SessionToken())
+ prm.SetBearerToken(exec.prm.common.BearerToken())
+ prm.SetXHeaders(exec.prm.common.XHeaders())
+ prm.SetRange(rng)
+
+ if exec.isRaw() {
+ prm.SetRawFlag()
+ }
+
+ res, err := internalclient.PayloadRange(prm)
+ if err != nil {
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ if errors.As(err, &errAccessDenied) {
+ obj, err := c.get(ctx, exec, key)
+ if err != nil {
+ return nil, err
+ }
+
+ payload := obj.Payload()
+ from := rng.GetOffset()
+ to := from + rng.GetLength()
+
+ if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to {
+ return nil, new(apistatus.ObjectOutOfRange)
+ }
+
+ return payloadOnlyObject(payload[from:to]), nil
+ }
+ return nil, err
+ }
+
+ return payloadOnlyObject(res.PayloadRange()), nil
+}
+
+func (c *clientWrapper) getHeadOnly(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
+ var prm internalclient.HeadObjectPrm
+
+ prm.SetContext(ctx)
+ prm.SetClient(c.client)
+ prm.SetTTL(exec.prm.common.TTL())
+ prm.SetNetmapEpoch(exec.curProcEpoch)
+ prm.SetAddress(exec.address())
+ prm.SetPrivateKey(key)
+ prm.SetSessionToken(exec.prm.common.SessionToken())
+ prm.SetBearerToken(exec.prm.common.BearerToken())
+ prm.SetXHeaders(exec.prm.common.XHeaders())
+
+ if exec.isRaw() {
+ prm.SetRawFlag()
+ }
+
+ res, err := internalclient.HeadObject(prm)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.Header(), nil
+}
+
func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
var prm internalclient.GetObjectPrm
From f8898932164b3b1affa5178f11b3a11f521b621c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 16:30:46 +0300
Subject: [PATCH 0044/1943] [#193] getsvc: Refactor head param creation
Resolve funlen linter for toHeadPrm method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/v2/head_forwarder.go | 180 +++++++++++++++++++
pkg/services/object/get/v2/util.go | 138 +-------------
2 files changed, 188 insertions(+), 130 deletions(-)
create mode 100644 pkg/services/object/get/v2/head_forwarder.go
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
new file mode 100644
index 000000000..b38da7131
--- /dev/null
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -0,0 +1,180 @@
+package getsvc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+type headRequestForwarder struct {
+ Request *objectV2.HeadRequest
+ Response *objectV2.HeadResponse
+ OnceResign *sync.Once
+ ObjectAddr oid.Address
+ KeyStorage *util.KeyStorage
+}
+
+func (f *headRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ var err error
+
+ key, err := f.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ headResp, err := f.sendHeadRequest(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := f.verifyResponse(headResp, pubkey); err != nil {
+ return nil, err
+ }
+
+ var (
+ hdr *objectV2.Header
+ idSig *refs.Signature
+ )
+
+ switch v := headResp.GetBody().GetHeaderPart().(type) {
+ case nil:
+ return nil, fmt.Errorf("unexpected header type %T", v)
+ case *objectV2.ShortHeader:
+ if hdr, err = f.getHeaderFromShortHeader(v); err != nil {
+ return nil, err
+ }
+ case *objectV2.HeaderWithSignature:
+ if hdr, idSig, err = f.getHeaderAndSignature(v); err != nil {
+ return nil, err
+ }
+ case *objectV2.SplitInfo:
+ si := object.NewSplitInfoFromV2(v)
+ return nil, object.NewSplitInfoError(si)
+ }
+
+ objv2 := new(objectV2.Object)
+ objv2.SetHeader(hdr)
+ objv2.SetSignature(idSig)
+
+ obj := object.NewFromV2(objv2)
+ obj.SetID(f.ObjectAddr.Object())
+
+ return obj, nil
+}
+
+func (f *headRequestForwarder) getHeaderFromShortHeader(sh *objectV2.ShortHeader) (*objectV2.Header, error) {
+ if !f.Request.GetBody().GetMainOnly() {
+ return nil, fmt.Errorf("wrong header part type: expected %T, received %T",
+ (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil),
+ )
+ }
+
+ hdr := new(objectV2.Header)
+ hdr.SetPayloadLength(sh.GetPayloadLength())
+ hdr.SetVersion(sh.GetVersion())
+ hdr.SetOwnerID(sh.GetOwnerID())
+ hdr.SetObjectType(sh.GetObjectType())
+ hdr.SetCreationEpoch(sh.GetCreationEpoch())
+ hdr.SetPayloadHash(sh.GetPayloadHash())
+ hdr.SetHomomorphicHash(sh.GetHomomorphicHash())
+ return hdr, nil
+}
+
+func (f *headRequestForwarder) getHeaderAndSignature(hdrWithSig *objectV2.HeaderWithSignature) (*objectV2.Header, *refs.Signature, error) {
+ if f.Request.GetBody().GetMainOnly() {
+ return nil, nil, fmt.Errorf("wrong header part type: expected %T, received %T",
+ (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil),
+ )
+ }
+
+ if hdrWithSig == nil {
+ return nil, nil, errors.New("nil object part")
+ }
+
+ hdr := hdrWithSig.GetHeader()
+ idSig := hdrWithSig.GetSignature()
+
+ if idSig == nil {
+ // TODO(@cthulhu-rider): #1387 use "const" error
+ return nil, nil, errors.New("missing signature")
+ }
+
+ binID, err := f.ObjectAddr.Object().Marshal()
+ if err != nil {
+ return nil, nil, fmt.Errorf("marshal ID: %w", err)
+ }
+
+ var sig frostfscrypto.Signature
+ if err := sig.ReadFromV2(*idSig); err != nil {
+ return nil, nil, fmt.Errorf("can't read signature: %w", err)
+ }
+
+ if !sig.Verify(binID) {
+ return nil, nil, errors.New("invalid object ID signature")
+ }
+
+ return hdr, idSig, nil
+}
+
+func (f *headRequestForwarder) sendHeadRequest(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*objectV2.HeadResponse, error) {
+ var headResp *objectV2.HeadResponse
+ err := c.RawForAddress(addr, func(cli *rpcclient.Client) error {
+ var e error
+ headResp, e = rpc.HeadObject(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, fmt.Errorf("sending the request failed: %w", err)
+ }
+ return headResp, nil
+}
+
+func (f *headRequestForwarder) verifyResponse(headResp *objectV2.HeadResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, headResp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(headResp); err != nil {
+ return fmt.Errorf("response verification failed: %w", err)
+ }
+
+ if err := checkStatus(f.Response.GetMetaHeader().GetStatus()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index a871714a1..c659f4e7c 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -24,7 +24,6 @@ import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
@@ -426,7 +425,6 @@ func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *object.Object)
return nil
}
-// nolint: funlen
func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) {
body := req.GetBody()
@@ -442,8 +440,6 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
return nil, fmt.Errorf("invalid object address: %w", err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -463,134 +459,16 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
return p, nil
}
- var onceResign sync.Once
+ forwarder := &headRequestForwarder{
+ Request: req,
+ Response: resp,
+ OnceResign: &sync.Once{},
+ ObjectAddr: objAddr,
+ KeyStorage: s.keyStorage,
+ }
p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
-
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.GetObjectHeader implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // send Head request
- var headResp *objectV2.HeadResponse
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- headResp, err = rpc.HeadObject(cli, req, rpcclient.WithContext(ctx))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("sending the request failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, headResp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(headResp); err != nil {
- return nil, fmt.Errorf("response verification failed: %w", err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- var (
- hdr *objectV2.Header
- idSig *refs.Signature
- )
-
- switch v := headResp.GetBody().GetHeaderPart().(type) {
- case nil:
- return nil, fmt.Errorf("unexpected header type %T", v)
- case *objectV2.ShortHeader:
- if !body.GetMainOnly() {
- return nil, fmt.Errorf("wrong header part type: expected %T, received %T",
- (*objectV2.ShortHeader)(nil), (*objectV2.HeaderWithSignature)(nil),
- )
- }
-
- h := v
-
- hdr = new(objectV2.Header)
- hdr.SetPayloadLength(h.GetPayloadLength())
- hdr.SetVersion(h.GetVersion())
- hdr.SetOwnerID(h.GetOwnerID())
- hdr.SetObjectType(h.GetObjectType())
- hdr.SetCreationEpoch(h.GetCreationEpoch())
- hdr.SetPayloadHash(h.GetPayloadHash())
- hdr.SetHomomorphicHash(h.GetHomomorphicHash())
- case *objectV2.HeaderWithSignature:
- if body.GetMainOnly() {
- return nil, fmt.Errorf("wrong header part type: expected %T, received %T",
- (*objectV2.HeaderWithSignature)(nil), (*objectV2.ShortHeader)(nil),
- )
- }
-
- hdrWithSig := v
- if hdrWithSig == nil {
- return nil, errors.New("nil object part")
- }
-
- hdr = hdrWithSig.GetHeader()
- idSig = hdrWithSig.GetSignature()
-
- if idSig == nil {
- // TODO(@cthulhu-rider): #1387 use "const" error
- return nil, errors.New("missing signature")
- }
-
- binID, err := objAddr.Object().Marshal()
- if err != nil {
- return nil, fmt.Errorf("marshal ID: %w", err)
- }
-
- var sig frostfscrypto.Signature
- if err := sig.ReadFromV2(*idSig); err != nil {
- return nil, fmt.Errorf("can't read signature: %w", err)
- }
-
- if !sig.Verify(binID) {
- return nil, errors.New("invalid object ID signature")
- }
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
-
- return nil, object.NewSplitInfoError(si)
- }
-
- objv2 := new(objectV2.Object)
- objv2.SetHeader(hdr)
- objv2.SetSignature(idSig)
-
- obj := object.NewFromV2(objv2)
- obj.SetID(objAddr.Object())
-
- // convert the object
- return obj, nil
+ return forwarder.forward(ctx, addr, c, pubkey)
}))
return p, nil
From b0786d2e5c5eeadf0a3859f17ef6c6c7a70d05be Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 17:38:38 +0300
Subject: [PATCH 0045/1943] [#193] getsvc: Refactor get params creation
Resolve funlen linter for toPrm function.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/v2/get_forwarder.go | 171 ++++++++++++++++++++
pkg/services/object/get/v2/util.go | 138 ++--------------
2 files changed, 180 insertions(+), 129 deletions(-)
create mode 100644 pkg/services/object/get/v2/get_forwarder.go
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
new file mode 100644
index 000000000..b0ba47523
--- /dev/null
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -0,0 +1,171 @@
+package getsvc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+type getRequestForwarder struct {
+ OnceResign *sync.Once
+ OnceHeaderSending *sync.Once
+ GlobalProgress int
+ KeyStorage *util.KeyStorage
+ Request *objectV2.GetRequest
+ Stream *streamObjectWriter
+}
+
+func (f *getRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ key, err := f.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+ f.Request.SetMetaHeader(metaHdr)
+ err = signature.SignServiceMessage(key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ getStream, err := f.openStream(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+ return nil, f.readStream(ctx, c, getStream, pubkey)
+}
+
+func (f *getRequestForwarder) verifyResponse(resp *objectV2.GetResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return fmt.Errorf("response verification failed: %w", err)
+ }
+
+ if err := checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetObjectPartInit) error {
+ obj := new(objectV2.Object)
+
+ obj.SetObjectID(v.GetObjectID())
+ obj.SetSignature(v.GetSignature())
+ obj.SetHeader(v.GetHeader())
+
+ var err error
+ f.OnceHeaderSending.Do(func() {
+ err = f.Stream.WriteHeader(ctx, object.NewFromV2(obj))
+ })
+ if err != nil {
+ return fmt.Errorf("could not write object header in Get forwarder: %w", err)
+ }
+ return nil
+}
+
+func (f *getRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.GetResponseReader, error) {
+ var getStream *rpc.GetResponseReader
+ err := c.RawForAddress(addr, func(cli *rpcclient.Client) error {
+ var e error
+ getStream, e = rpc.GetObject(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, fmt.Errorf("stream opening failed: %w", err)
+ }
+ return getStream, nil
+}
+
+func (f *getRequestForwarder) readStream(ctx context.Context, c client.MultiAddressClient, getStream *rpc.GetResponseReader, pubkey []byte) error {
+ var (
+ headWas bool
+ resp = new(objectV2.GetResponse)
+ localProgress int
+ )
+
+ for {
+ // receive message from server stream
+ err := getStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ if !headWas {
+ return io.ErrUnexpectedEOF
+ }
+
+ break
+ }
+
+ internalclient.ReportError(c, err)
+ return fmt.Errorf("reading the response failed: %w", err)
+ }
+
+ if err := f.verifyResponse(resp, pubkey); err != nil {
+ return err
+ }
+
+ switch v := resp.GetBody().GetObjectPart().(type) {
+ default:
+ return fmt.Errorf("unexpected object part %T", v)
+ case *objectV2.GetObjectPartInit:
+ if headWas {
+ return errWrongMessageSeq
+ }
+ headWas = true
+ if err := f.writeHeader(ctx, v); err != nil {
+ return err
+ }
+ case *objectV2.GetObjectPartChunk:
+ if !headWas {
+ return errWrongMessageSeq
+ }
+
+ origChunk := v.GetChunk()
+
+ chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk)
+ if len(chunk) == 0 {
+ localProgress += len(origChunk)
+ continue
+ }
+
+ if err = f.Stream.WriteChunk(ctx, chunk); err != nil {
+ return fmt.Errorf("could not write object chunk in Get forwarder: %w", err)
+ }
+
+ localProgress += len(origChunk)
+ f.GlobalProgress += len(chunk)
+ case *objectV2.SplitInfo:
+ si := object.NewSplitInfoFromV2(v)
+ return object.NewSplitInfoError(si)
+ }
+ }
+ return nil
+}
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index c659f4e7c..3a9cf3b07 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -32,7 +32,6 @@ import (
var errWrongMessageSeq = errors.New("incorrect message sequence")
-// nolint: funlen, gocognit
func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStream) (*getsvc.Prm, error) {
body := req.GetBody()
@@ -48,8 +47,6 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
return nil, fmt.Errorf("invalid object address: %w", err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -65,134 +62,17 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
p.SetObjectWriter(streamWrapper)
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
-
- var onceHeaderSending sync.Once
- var globalProgress int
+ forwarder := &getRequestForwarder{
+ OnceResign: &sync.Once{},
+ OnceHeaderSending: &sync.Once{},
+ GlobalProgress: 0,
+ KeyStorage: s.keyStorage,
+ Request: req,
+ Stream: streamWrapper,
+ }
p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
-
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.GetObject implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // open stream
- var getStream *rpc.GetResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- getStream, err = rpc.GetObject(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("stream opening failed: %w", err)
- }
-
- var (
- headWas bool
- resp = new(objectV2.GetResponse)
- localProgress int
- )
-
- for {
- // receive message from server stream
- err := getStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- if !headWas {
- return nil, io.ErrUnexpectedEOF
- }
-
- break
- }
-
- internalclient.ReportError(c, err)
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("response verification failed: %w", err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- switch v := resp.GetBody().GetObjectPart().(type) {
- default:
- return nil, fmt.Errorf("unexpected object part %T", v)
- case *objectV2.GetObjectPartInit:
- if headWas {
- return nil, errWrongMessageSeq
- }
-
- headWas = true
-
- obj := new(objectV2.Object)
-
- obj.SetObjectID(v.GetObjectID())
- obj.SetSignature(v.GetSignature())
- obj.SetHeader(v.GetHeader())
-
- onceHeaderSending.Do(func() {
- err = streamWrapper.WriteHeader(stream.Context(), object.NewFromV2(obj))
- })
- if err != nil {
- return nil, fmt.Errorf("could not write object header in Get forwarder: %w", err)
- }
- case *objectV2.GetObjectPartChunk:
- if !headWas {
- return nil, errWrongMessageSeq
- }
-
- origChunk := v.GetChunk()
-
- chunk := chunkToSend(globalProgress, localProgress, origChunk)
- if len(chunk) == 0 {
- localProgress += len(origChunk)
- continue
- }
-
- if err = streamWrapper.WriteChunk(stream.Context(), chunk); err != nil {
- return nil, fmt.Errorf("could not write object chunk in Get forwarder: %w", err)
- }
-
- localProgress += len(origChunk)
- globalProgress += len(chunk)
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
- return nil, object.NewSplitInfoError(si)
- }
- }
-
- return nil, nil
+ return forwarder.forward(stream.Context(), addr, c, pubkey)
}))
}
From 6c7b708a98a7e95fbe8777a1abda75eedf589254 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 31 Mar 2023 17:41:42 +0300
Subject: [PATCH 0046/1943] [#193] getsvc: Refactor get range params creation
Resolve funlen linter for toRangePrm function.
Signed-off-by: Dmitrii Stepanov
---
.../object/get/v2/get_range_forwarder.go | 137 ++++++++++++++++++
pkg/services/object/get/v2/util.go | 110 +-------------
2 files changed, 144 insertions(+), 103 deletions(-)
create mode 100644 pkg/services/object/get/v2/get_range_forwarder.go
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
new file mode 100644
index 000000000..a9526f714
--- /dev/null
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -0,0 +1,137 @@
+package getsvc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
+ rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
+ internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+)
+
+type getRangeRequestForwarder struct {
+ OnceResign *sync.Once
+ GlobalProgress int
+ KeyStorage *util.KeyStorage
+ Request *objectV2.GetRangeRequest
+ Stream *streamObjectRangeWriter
+}
+
+func (f *getRangeRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ key, err := f.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // once compose and resign forwarding request
+ f.OnceResign.Do(func() {
+ // compose meta header of the local server
+ metaHdr := new(session.RequestMetaHeader)
+ metaHdr.SetTTL(f.Request.GetMetaHeader().GetTTL() - 1)
+ // TODO: #1165 think how to set the other fields
+ metaHdr.SetOrigin(f.Request.GetMetaHeader())
+ writeCurrentVersion(metaHdr)
+
+ f.Request.SetMetaHeader(metaHdr)
+
+ err = signature.SignServiceMessage(key, f.Request)
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ rangeStream, err := f.openStream(ctx, addr, c)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, f.readStream(ctx, rangeStream, c, pubkey)
+}
+
+func (f *getRangeRequestForwarder) verifyResponse(resp *objectV2.GetRangeResponse, pubkey []byte) error {
+ // verify response key
+ if err := internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
+ return err
+ }
+
+ // verify response structure
+ if err := signature.VerifyServiceMessage(resp); err != nil {
+ return fmt.Errorf("could not verify %T: %w", resp, err)
+ }
+
+ if err := checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (f *getRangeRequestForwarder) openStream(ctx context.Context, addr network.Address, c client.MultiAddressClient) (*rpc.ObjectRangeResponseReader, error) {
+ // open stream
+ var rangeStream *rpc.ObjectRangeResponseReader
+ err := c.RawForAddress(addr, func(cli *rpcclient.Client) error {
+ var e error
+ rangeStream, e = rpc.GetObjectRange(cli, f.Request, rpcclient.WithContext(ctx))
+ return e
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not create Get payload range stream: %w", err)
+ }
+ return rangeStream, nil
+}
+
+func (f *getRangeRequestForwarder) readStream(ctx context.Context, rangeStream *rpc.ObjectRangeResponseReader, c client.MultiAddressClient, pubkey []byte) error {
+ resp := new(objectV2.GetRangeResponse)
+ var localProgress int
+
+ for {
+ // receive message from server stream
+ err := rangeStream.Read(resp)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ internalclient.ReportError(c, err)
+ return fmt.Errorf("reading the response failed: %w", err)
+ }
+
+ if err := f.verifyResponse(resp, pubkey); err != nil {
+ return err
+ }
+
+ switch v := resp.GetBody().GetRangePart().(type) {
+ case nil:
+ return fmt.Errorf("unexpected range type %T", v)
+ case *objectV2.GetRangePartChunk:
+ origChunk := v.GetChunk()
+
+ chunk := chunkToSend(f.GlobalProgress, localProgress, origChunk)
+ if len(chunk) == 0 {
+ localProgress += len(origChunk)
+ continue
+ }
+
+ if err = f.Stream.WriteChunk(ctx, chunk); err != nil {
+ return fmt.Errorf("could not write object chunk in GetRange forwarder: %w", err)
+ }
+
+ localProgress += len(origChunk)
+ f.GlobalProgress += len(chunk)
+ case *objectV2.SplitInfo:
+ si := object.NewSplitInfoFromV2(v)
+ return object.NewSplitInfoError(si)
+ }
+ }
+ return nil
+}
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index 3a9cf3b07..dffa0d9b1 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -6,22 +6,16 @@ import (
"errors"
"fmt"
"hash"
- "io"
"sync"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
- rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -79,7 +73,6 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
return p, nil
}
-// nolint: funlen, gocognit
func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) (*getsvc.RangePrm, error) {
body := req.GetBody()
@@ -95,8 +88,6 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
return nil, fmt.Errorf("invalid object address: %w", err)
}
- meta := req.GetMetaHeader()
-
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return nil, err
@@ -118,103 +109,16 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
}
if !commonPrm.LocalOnly() {
- var onceResign sync.Once
- var globalProgress int
-
- key, err := s.keyStorage.GetKey(nil)
- if err != nil {
- return nil, err
+ forwarder := &getRangeRequestForwarder{
+ OnceResign: &sync.Once{},
+ GlobalProgress: 0,
+ KeyStorage: s.keyStorage,
+ Request: req,
+ Stream: streamWrapper,
}
p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- var err error
-
- // once compose and resign forwarding request
- onceResign.Do(func() {
- // compose meta header of the local server
- metaHdr := new(session.RequestMetaHeader)
- metaHdr.SetTTL(meta.GetTTL() - 1)
- // TODO: #1165 think how to set the other fields
- metaHdr.SetOrigin(meta)
- writeCurrentVersion(metaHdr)
-
- req.SetMetaHeader(metaHdr)
-
- err = signature.SignServiceMessage(key, req)
- })
-
- if err != nil {
- return nil, err
- }
-
- // code below is copy-pasted from c.ObjectPayloadRangeData implementation,
- // perhaps it is worth highlighting the utility function in frostfs-api-go
-
- // open stream
- var rangeStream *rpc.ObjectRangeResponseReader
- err = c.RawForAddress(addr, func(cli *rpcclient.Client) error {
- rangeStream, err = rpc.GetObjectRange(cli, req, rpcclient.WithContext(stream.Context()))
- return err
- })
- if err != nil {
- return nil, fmt.Errorf("could not create Get payload range stream: %w", err)
- }
-
- resp := new(objectV2.GetRangeResponse)
- var localProgress int
-
- for {
- // receive message from server stream
- err := rangeStream.Read(resp)
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
-
- internalclient.ReportError(c, err)
- return nil, fmt.Errorf("reading the response failed: %w", err)
- }
-
- // verify response key
- if err = internal.VerifyResponseKeyV2(pubkey, resp); err != nil {
- return nil, err
- }
-
- // verify response structure
- if err := signature.VerifyServiceMessage(resp); err != nil {
- return nil, fmt.Errorf("could not verify %T: %w", resp, err)
- }
-
- if err = checkStatus(resp.GetMetaHeader().GetStatus()); err != nil {
- return nil, err
- }
-
- switch v := resp.GetBody().GetRangePart().(type) {
- case nil:
- return nil, fmt.Errorf("unexpected range type %T", v)
- case *objectV2.GetRangePartChunk:
- origChunk := v.GetChunk()
-
- chunk := chunkToSend(globalProgress, localProgress, origChunk)
- if len(chunk) == 0 {
- localProgress += len(origChunk)
- continue
- }
-
- if err = streamWrapper.WriteChunk(stream.Context(), chunk); err != nil {
- return nil, fmt.Errorf("could not write object chunk in GetRange forwarder: %w", err)
- }
-
- localProgress += len(origChunk)
- globalProgress += len(chunk)
- case *objectV2.SplitInfo:
- si := object.NewSplitInfoFromV2(v)
-
- return nil, object.NewSplitInfoError(si)
- }
- }
-
- return nil, nil
+ return forwarder.forward(stream.Context(), addr, c, pubkey)
}))
}
From 89924071cd2f06631af0bfac88f32fcfab10a34d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 11:17:59 +0300
Subject: [PATCH 0047/1943] [#193] getsvc: Edit request forwarder signature
Pass context to forwarder direct, without closure.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/prm.go | 2 +-
pkg/services/object/get/util.go | 2 +-
pkg/services/object/get/v2/get_forwarder.go | 2 +-
.../object/get/v2/get_range_forwarder.go | 2 +-
pkg/services/object/get/v2/head_forwarder.go | 2 +-
pkg/services/object/get/v2/util.go | 18 ++++++------------
6 files changed, 11 insertions(+), 17 deletions(-)
diff --git a/pkg/services/object/get/prm.go b/pkg/services/object/get/prm.go
index 88848264e..7a0f1e062 100644
--- a/pkg/services/object/get/prm.go
+++ b/pkg/services/object/get/prm.go
@@ -59,7 +59,7 @@ type RangeHashPrm struct {
salt []byte
}
-type RequestForwarder func(coreclient.NodeInfo, coreclient.MultiAddressClient) (*object.Object, error)
+type RequestForwarder func(context.Context, coreclient.NodeInfo, coreclient.MultiAddressClient) (*object.Object, error)
// HeadPrm groups parameters of Head service call.
type HeadPrm struct {
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index a68232c9a..7986d05c0 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -89,7 +89,7 @@ func (c *clientCacheWrapper) get(info coreclient.NodeInfo) (getClient, error) {
func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info coreclient.NodeInfo) (*object.Object, error) {
if exec.isForwardingEnabled() {
- return exec.prm.forwarder(info, c.client)
+ return exec.prm.forwarder(ctx, info, c.client)
}
key, err := exec.key()
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index b0ba47523..7314cceb5 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -29,7 +29,7 @@ type getRequestForwarder struct {
Stream *streamObjectWriter
}
-func (f *getRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
key, err := f.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index a9526f714..8fa4351d4 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -28,7 +28,7 @@ type getRangeRequestForwarder struct {
Stream *streamObjectRangeWriter
}
-func (f *getRangeRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
key, err := f.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index b38da7131..e0b58a35d 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -29,7 +29,7 @@ type headRequestForwarder struct {
KeyStorage *util.KeyStorage
}
-func (f *headRequestForwarder) forward(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
var err error
key, err := f.KeyStorage.GetKey(nil)
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index dffa0d9b1..75228e2b1 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -65,9 +65,7 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
Stream: streamWrapper,
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- return forwarder.forward(stream.Context(), addr, c, pubkey)
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
}
return p, nil
@@ -117,9 +115,7 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
Stream: streamWrapper,
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- return forwarder.forward(stream.Context(), addr, c, pubkey)
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
}
return p, nil
@@ -251,9 +247,7 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
KeyStorage: s.keyStorage,
}
- p.SetRequestForwarder(groupAddressRequestForwarder(func(addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- return forwarder.forward(ctx, addr, c, pubkey)
- }))
+ p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
return p, nil
}
@@ -321,8 +315,8 @@ func toShortObjectHeader(hdr *object.Object) objectV2.GetHeaderPart {
return sh
}
-func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressClient, []byte) (*object.Object, error)) getsvc.RequestForwarder {
- return func(info client.NodeInfo, c client.MultiAddressClient) (*object.Object, error) {
+func groupAddressRequestForwarder(f func(context.Context, network.Address, client.MultiAddressClient, []byte) (*object.Object, error)) getsvc.RequestForwarder {
+ return func(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) (*object.Object, error) {
var (
firstErr error
res *object.Object
@@ -343,7 +337,7 @@ func groupAddressRequestForwarder(f func(network.Address, client.MultiAddressCli
// would be nice to log otherwise
}()
- res, err = f(addr, c, key)
+ res, err = f(ctx, addr, c, key)
return
})
From c58ab0c3693078ecf527abf0b2955b0e5e021a6b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 13:20:57 +0300
Subject: [PATCH 0048/1943] [#193] getsvc: Reduce private key requests
Get private key only once for request forwaring.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/v2/get_forwarder.go | 11 ++++------
.../object/get/v2/get_range_forwarder.go | 11 ++++------
pkg/services/object/get/v2/head_forwarder.go | 11 +++-------
pkg/services/object/get/v2/util.go | 21 ++++++++++++++++---
4 files changed, 29 insertions(+), 25 deletions(-)
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 7314cceb5..330a0642f 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -2,6 +2,7 @@ package getsvc
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
"io"
@@ -16,7 +17,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
@@ -24,16 +24,13 @@ type getRequestForwarder struct {
OnceResign *sync.Once
OnceHeaderSending *sync.Once
GlobalProgress int
- KeyStorage *util.KeyStorage
+ Key *ecdsa.PrivateKey
Request *objectV2.GetRequest
Stream *streamObjectWriter
}
func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- key, err := f.KeyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
+ var err error
// once compose and resign forwarding request
f.OnceResign.Do(func() {
@@ -44,7 +41,7 @@ func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr net
metaHdr.SetOrigin(f.Request.GetMetaHeader())
writeCurrentVersion(metaHdr)
f.Request.SetMetaHeader(metaHdr)
- err = signature.SignServiceMessage(key, f.Request)
+ err = signature.SignServiceMessage(f.Key, f.Request)
})
if err != nil {
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index 8fa4351d4..5893f8de3 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -2,6 +2,7 @@ package getsvc
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
"io"
@@ -16,23 +17,19 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
type getRangeRequestForwarder struct {
OnceResign *sync.Once
GlobalProgress int
- KeyStorage *util.KeyStorage
+ Key *ecdsa.PrivateKey
Request *objectV2.GetRangeRequest
Stream *streamObjectRangeWriter
}
func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
- key, err := f.KeyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
+ var err error
// once compose and resign forwarding request
f.OnceResign.Do(func() {
@@ -45,7 +42,7 @@ func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, add
f.Request.SetMetaHeader(metaHdr)
- err = signature.SignServiceMessage(key, f.Request)
+ err = signature.SignServiceMessage(f.Key, f.Request)
})
if err != nil {
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index e0b58a35d..45c0174fd 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -2,6 +2,7 @@ package getsvc
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
"sync"
@@ -15,7 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -26,17 +26,12 @@ type headRequestForwarder struct {
Response *objectV2.HeadResponse
OnceResign *sync.Once
ObjectAddr oid.Address
- KeyStorage *util.KeyStorage
+ Key *ecdsa.PrivateKey
}
func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
var err error
- key, err := f.KeyStorage.GetKey(nil)
- if err != nil {
- return nil, err
- }
-
// once compose and resign forwarding request
f.OnceResign.Do(func() {
// compose meta header of the local server
@@ -48,7 +43,7 @@ func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr ne
f.Request.SetMetaHeader(metaHdr)
- err = signature.SignServiceMessage(key, f.Request)
+ err = signature.SignServiceMessage(f.Key, f.Request)
})
if err != nil {
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index 75228e2b1..3a50a6ca5 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -56,11 +56,16 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
p.SetObjectWriter(streamWrapper)
if !commonPrm.LocalOnly() {
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
forwarder := &getRequestForwarder{
OnceResign: &sync.Once{},
OnceHeaderSending: &sync.Once{},
GlobalProgress: 0,
- KeyStorage: s.keyStorage,
+ Key: key,
Request: req,
Stream: streamWrapper,
}
@@ -107,10 +112,15 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
}
if !commonPrm.LocalOnly() {
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
forwarder := &getRangeRequestForwarder{
OnceResign: &sync.Once{},
GlobalProgress: 0,
- KeyStorage: s.keyStorage,
+ Key: key,
Request: req,
Stream: streamWrapper,
}
@@ -239,12 +249,17 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
return p, nil
}
+ key, err := s.keyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+
forwarder := &headRequestForwarder{
Request: req,
Response: resp,
OnceResign: &sync.Once{},
ObjectAddr: objAddr,
- KeyStorage: s.keyStorage,
+ Key: key,
}
p.SetRequestForwarder(groupAddressRequestForwarder(forwarder.forwardRequestToNode))
From ab891517de1a1a34c565bb3c59e0e1cf4eac6515 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 5 Apr 2023 18:47:11 +0300
Subject: [PATCH 0049/1943] [#116] node: Fix bug with extra generated files in
TestReload
* Create testNewEngine in engineWithShards without default opts
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
---
pkg/local_object_storage/engine/control_test.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 2c44eb169..12771340b 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
"go.uber.org/atomic"
+ "go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
@@ -321,8 +322,9 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
currShards := make([]string, 0, num)
te := testNewEngine(t).
- setShardsNumAdditionalOpts(t, num, func(id int) []shard.Option {
+ setShardsNumOpts(t, num, func(id int) []shard.Option {
return []shard.Option{
+ shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(
blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
shard.WithMetaBaseOptions(
From 9e2df4b7c73884368b2e6a06c5a55ff51d59f65a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:11:56 +0300
Subject: [PATCH 0050/1943] [#203] node: Fix double imports
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/object/head.go | 3 +--
pkg/innerring/initialization.go | 15 +++++++--------
pkg/local_object_storage/metabase/db_test.go | 5 ++---
pkg/local_object_storage/metabase/put.go | 9 ++++-----
pkg/local_object_storage/pilorama/forest.go | 19 +++++++++----------
pkg/local_object_storage/shard/gc_test.go | 5 ++---
pkg/morph/client/container/get.go | 3 +--
pkg/services/object/get/util.go | 3 +--
.../object_manager/tombstone/source/source.go | 3 +--
pkg/services/tree/sync.go | 5 ++---
10 files changed, 30 insertions(+), 40 deletions(-)
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index fc2e7c9c6..139563e24 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -15,7 +15,6 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- oidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
)
@@ -113,7 +112,7 @@ func marshalHeader(cmd *cobra.Command, hdr *object.Object) ([]byte, error) {
}
}
-func printObjectID(cmd *cobra.Command, recv func() (oidSDK.ID, bool)) {
+func printObjectID(cmd *cobra.Command, recv func() (oid.ID, bool)) {
var strID string
id, ok := recv()
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 30b8d43a0..8db6328a2 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -25,7 +25,6 @@ import (
auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
balanceClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
- cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -229,7 +228,7 @@ func (s *Server) createSettlementProcessor(clientCache *ClientCache, cnrClient *
// create settlement processor dependencies
settlementDeps := settlementDeps{
log: s.log,
- cnrSrc: cntClient.AsContainerSource(cnrClient),
+ cnrSrc: container.AsContainerSource(cnrClient),
auditClient: s.auditClient,
nmClient: s.netmapClient,
clientCache: clientCache,
@@ -545,7 +544,7 @@ func (s *Server) initGRPCServer(cfg *viper.Viper) error {
}
type serverMorphClients struct {
- CnrClient *cntClient.Client
+ CnrClient *container.Client
FrostFSIDClient *frostfsid.Client
FrostFSClient *frostfsClient.Client
MorphSubnetClient *morphsubnet.Client
@@ -564,21 +563,21 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) {
}
// form morph container client's options
- morphCnrOpts := make([]cntClient.Option, 0, 3)
+ morphCnrOpts := make([]container.Option, 0, 3)
morphCnrOpts = append(morphCnrOpts,
- cntClient.TryNotary(),
- cntClient.AsAlphabet(),
+ container.TryNotary(),
+ container.AsAlphabet(),
)
if s.sideNotaryConfig.disabled {
// in non-notary environments we customize fee for named container registration
// because it takes much more additional GAS than other operations.
morphCnrOpts = append(morphCnrOpts,
- cntClient.WithCustomFeeForNamedPut(s.feeConfig.NamedContainerRegistrationFee()),
+ container.WithCustomFeeForNamedPut(s.feeConfig.NamedContainerRegistrationFee()),
)
}
- result.CnrClient, err = cntClient.NewFromMorph(s.morphClient, s.contracts.container, fee, morphCnrOpts...)
+ result.CnrClient, err = container.NewFromMorph(s.morphClient, s.contracts.container, fee, morphCnrOpts...)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index cdb90cc16..9ef7bf8bc 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
@@ -26,11 +25,11 @@ func (s epochState) CurrentEpoch() uint64 {
}
// saves "big" object in DB.
-func putBig(db *meta.DB, obj *object.Object) error {
+func putBig(db *meta.DB, obj *objectSDK.Object) error {
return metaPut(db, obj, nil)
}
-func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs object.SearchFilters, exp ...oid.Address) {
+func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
res, err := metaSelect(db, cnr, fs)
require.NoError(t, err)
require.Len(t, res, len(exp))
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 8e11c5d9c..b0fea6535 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -6,7 +6,6 @@ import (
"fmt"
gio "io"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
@@ -89,7 +88,7 @@ func (db *DB) put(tx *bbolt.Tx,
isParent := si != nil
- exists, err := db.exists(tx, object.AddressOf(obj), currEpoch)
+ exists, err := db.exists(tx, objectCore.AddressOf(obj), currEpoch)
if errors.As(err, &splitInfoError) {
exists = true // object exists, however it is virtual
@@ -111,14 +110,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
// When storage engine moves objects between different sub-storages,
// it calls metabase.Put method with new storage ID, thus triggering this code.
if !isParent && id != nil {
- return updateStorageID(tx, object.AddressOf(obj), id)
+ return updateStorageID(tx, objectCore.AddressOf(obj), id)
}
// when storage already has last object in split hierarchy and there is
// a linking object to put (or vice versa), we should update split info
// with object ids of these objects
if isParent {
- return updateSplitInfo(tx, object.AddressOf(obj), si)
+ return updateSplitInfo(tx, objectCore.AddressOf(obj), si)
}
return nil
@@ -184,7 +183,7 @@ func putUniqueIndexes(
id []byte,
) error {
isParent := si != nil
- addr := object.AddressOf(obj)
+ addr := objectCore.AddressOf(obj)
cnr := addr.Container()
objKey := objectKey(addr.Object(), make([]byte, objectKeySize))
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index 21209420a..fa2f1dcd2 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// memoryForest represents multiple replicating trees sharing a single storage.
@@ -120,7 +119,7 @@ func (f *memoryForest) Close() error {
}
// TreeGetByPath implements the Forest interface.
-func (f *memoryForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@@ -135,7 +134,7 @@ func (f *memoryForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string,
}
// TreeGetMeta implements the Forest interface.
-func (f *memoryForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -146,7 +145,7 @@ func (f *memoryForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (M
}
// TreeGetChildren implements the Forest interface.
-func (f *memoryForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
+func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) ([]uint64, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -164,7 +163,7 @@ func (f *memoryForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (f *memoryForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (Move, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -181,7 +180,7 @@ func (f *memoryForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64)
}
// TreeDrop implements the pilorama.Forest interface.
-func (f *memoryForest) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error {
cidStr := cid.String()
if treeID == "" {
for k := range f.treeMap {
@@ -201,7 +200,7 @@ func (f *memoryForest) TreeDrop(cid cidSDK.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
-func (f *memoryForest) TreeList(cid cidSDK.ID) ([]string, error) {
+func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) {
var res []string
cidStr := cid.EncodeToString()
@@ -218,14 +217,14 @@ func (f *memoryForest) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
-func (f *memoryForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
+func (f *memoryForest) TreeExists(cid cid.ID, treeID string) (bool, error) {
fullID := cid.EncodeToString() + "/" + treeID
_, ok := f.treeMap[fullID]
return ok, nil
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
+func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, height uint64) error {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
@@ -236,7 +235,7 @@ func (f *memoryForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, he
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (f *memoryForest) TreeLastSyncHeight(cid cid.ID, treeID string) (uint64, error) {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 245669ff1..e7aa3614e 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -17,7 +17,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/panjf2000/ants/v2"
@@ -42,7 +41,7 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
blobovniczatree.WithBlobovniczaShallowDepth(2),
blobovniczatree.WithBlobovniczaShallowWidth(2)),
- Policy: func(_ *object.Object, data []byte) bool {
+ Policy: func(_ *objectSDK.Object, data []byte) bool {
return len(data) <= 1<<20
},
},
@@ -93,7 +92,7 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
lockExpirationAttr.SetValue("103")
lock := testutil.GenerateObjectWithCID(cnr)
- lock.SetType(object.TypeLock)
+ lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(lockExpirationAttr)
lockID, _ := lock.ID()
diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go
index 8d68141a5..4775cd281 100644
--- a/pkg/morph/client/container/get.go
+++ b/pkg/morph/client/container/get.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -23,7 +22,7 @@ func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) {
// AsContainerSource provides container Source interface
// from Wrapper instance.
-func AsContainerSource(w *Client) core.Source {
+func AsContainerSource(w *Client) containercore.Source {
return (*containerSource)(w)
}
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index 7986d05c0..3bb68862b 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -9,7 +9,6 @@ import (
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- internal "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -196,7 +195,7 @@ func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.Priva
prm.SetRawFlag()
}
- res, err := internal.GetObject(prm)
+ res, err := internalclient.GetObject(prm)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go
index 8dd0dcabb..4d0a81cde 100644
--- a/pkg/services/object_manager/tombstone/source/source.go
+++ b/pkg/services/object_manager/tombstone/source/source.go
@@ -8,7 +8,6 @@ import (
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -59,7 +58,7 @@ func (h *headerWriter) WriteHeader(_ context.Context, o *objectSDK.Object) error
// Tombstone checks if the engine stores tombstone.
// Returns nil, nil if the tombstone has been removed
// or marked for removal.
-func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*object.Object, error) {
+func (s Source) Tombstone(ctx context.Context, a oid.Address, _ uint64) (*objectSDK.Object, error) {
var hr headerWriter
var headPrm getsvc.HeadPrm
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index d4ef7df5d..2d5c104b0 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -14,7 +14,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
@@ -123,7 +122,7 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
return nil
}
-func (s *Service) synchronizeTree(ctx context.Context, cid cidSDK.ID, from uint64,
+func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo) uint64 {
s.log.Debug("synchronize tree",
zap.Stringer("cid", cid),
@@ -170,7 +169,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cidSDK.ID, from uint6
return newHeight
}
-func (s *Service) synchronizeSingle(ctx context.Context, cid cidSDK.ID, treeID string, height uint64, treeClient TreeServiceClient) (uint64, error) {
+func (s *Service) synchronizeSingle(ctx context.Context, cid cid.ID, treeID string, height uint64, treeClient TreeServiceClient) (uint64, error) {
rawCID := make([]byte, sha256.Size)
cid.Encode(rawCID)
From 68a2f36636fea51aaf62e699811641525fdc1e2c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:30:05 +0300
Subject: [PATCH 0051/1943] [#203] morph: Mark depracated methods
Skip staticcheck for depracated methods. Will be fixed soon.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-adm/internal/modules/morph/initialize_nns.go | 1 +
cmd/frostfs-adm/internal/modules/morph/initialize_register.go | 1 +
cmd/frostfs-adm/internal/modules/morph/initialize_test.go | 4 ++--
pkg/morph/client/notary.go | 4 ++++
4 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
index b43c2da33..edb7d6de5 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
@@ -286,6 +286,7 @@ func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
switch ct := c.(type) {
case *rpcclient.Client:
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
return ct.NNSIsAvailable(nnsHash, name)
default:
b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
index 1bfda7b54..27e1590cf 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
@@ -118,6 +118,7 @@ var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func (c *initializeContext) getCandidateRegisterPrice() (int64, error) {
switch ct := c.Client.(type) {
case *rpcclient.Client:
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
return ct.GetCandidateRegisterPrice()
default:
neoHash := neo.Hash
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
index 3c6adfc3c..39a35b12e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
@@ -101,11 +101,11 @@ func generateTestData(t *testing.T, dir string, size int) {
cfg := config.Config{}
cfg.ProtocolConfiguration.Magic = 12345
cfg.ProtocolConfiguration.ValidatorsCount = size
- cfg.ProtocolConfiguration.SecondsPerBlock = 1
+ cfg.ProtocolConfiguration.SecondsPerBlock = 1 //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters
cfg.ProtocolConfiguration.P2PSigExtensions = true
cfg.ProtocolConfiguration.VerifyTransactions = true
- cfg.ProtocolConfiguration.VerifyBlocks = true
+ cfg.ProtocolConfiguration.VerifyBlocks = true //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
data, err := yaml.Marshal(cfg)
require.NoError(t, err)
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 464d76d2a..9c7071e5a 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -423,6 +423,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
multiaddrAccount.PrivateKey().SignHashable(uint32(magicNumber), mainTx)...,
)
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
[]byte{byte(opcode.RET)},
-1,
@@ -518,6 +519,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
}
// calculate notary fee
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
notaryFee, err := c.client.CalculateNotaryFee(u8n)
if err != nil {
return err
@@ -525,6 +527,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
// add network fee for cosigners
//nolint:staticcheck // waits for neo-go v0.99.3 with notary actors
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
err = c.client.AddNetworkFee(
mainTx,
notaryFee,
@@ -537,6 +540,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
// define witnesses
mainTx.Scripts = c.notaryWitnesses(invokedByAlpha, multiaddrAccount, mainTx)
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
[]byte{byte(opcode.RET)},
-1,
From e21c5bea21ae820433006fcd8d4dfe1e18ea754a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:39:29 +0300
Subject: [PATCH 0052/1943] [#203] cli: Fix error message
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/container/delete.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go
index 52ee5d4fb..308c7b942 100644
--- a/cmd/frostfs-cli/modules/container/delete.go
+++ b/cmd/frostfs-cli/modules/container/delete.go
@@ -77,8 +77,8 @@ Only owner of the container has a permission to remove container.`,
if len(res.IDList()) != 0 {
commonCmd.ExitOnErr(cmd, "",
- fmt.Errorf("Container wasn't removed because LOCK objects were found.\n"+
- "Use --%s flag to remove anyway.", commonflags.ForceFlag))
+ fmt.Errorf("container wasn't removed because LOCK objects were found, "+
+ "use --%s flag to remove anyway", commonflags.ForceFlag))
}
}
}
From bab11492adae90644d6b9ed2f1d440ed705fb449 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:53:55 +0300
Subject: [PATCH 0053/1943] [#203] node: Resolve never used errors
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete_test.go | 9 +++++++--
pkg/local_object_storage/metabase/exists_test.go | 1 +
pkg/local_object_storage/pilorama/forest_test.go | 1 +
3 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index eae8b1c66..ee161a881 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -50,6 +50,11 @@ func TestDB_Delete(t *testing.T) {
err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
require.NoError(t, err)
+ ts = testutil.GenerateObjectWithCID(cnr)
+
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
+ require.NoError(t, err)
+
// delete object
err = metaDelete(db, object.AddressOf(child))
require.NoError(t, err)
@@ -62,11 +67,11 @@ func TestDB_Delete(t *testing.T) {
// check if they marked as already removed
ok, err := metaExists(db, object.AddressOf(child))
- require.Error(t, apistatus.ObjectAlreadyRemoved{})
+ require.ErrorAs(t, err, &apistatus.ObjectAlreadyRemoved{})
require.False(t, ok)
ok, err = metaExists(db, object.AddressOf(parent))
- require.Error(t, apistatus.ObjectAlreadyRemoved{})
+ require.ErrorAs(t, err, &apistatus.ObjectAlreadyRemoved{})
require.False(t, ok)
}
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 66f8c2bb3..e344e9ee8 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -181,6 +181,7 @@ func TestDB_Exists(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
gotObj, err = metaExists(db, object.AddressOf(nonExp))
+ require.NoError(t, err)
require.True(t, gotObj)
})
})
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 8e6f12717..ebf801ef9 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -225,6 +225,7 @@ func testForestTreeDrop(t *testing.T, s Forest) {
}
}
list, err := s.TreeList(cid)
+ require.NoError(t, err)
require.NotEmpty(t, list)
require.NoError(t, s.TreeDrop(cid, ""))
From 8908798f59285d18cb52025dd5541dc29d8643db Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:54:57 +0300
Subject: [PATCH 0054/1943] [#203] node: Resolve unused vars
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/get_test.go | 2 --
pkg/local_object_storage/shard/metrics_test.go | 1 -
2 files changed, 3 deletions(-)
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index d647910d5..a242a099a 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -185,8 +185,6 @@ func BenchmarkGet(b *testing.B) {
}
}
-var obj *objectSDK.Object
-
func benchmarkGet(b *testing.B, numOfObj int) {
prepareDb := func(batchSize int) (*meta.DB, []oid.Address) {
db := newDB(b,
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index ba46881e2..18e97e259 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -70,7 +70,6 @@ func (m *metricsStore) AddToPayloadSize(size int64) {
const physical = "phy"
const logical = "logic"
-const readonly = "readonly"
func TestCounters(t *testing.T) {
dir := t.TempDir()
From f32f61df8759670277818db1de3e8bdd559b3e73 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:56:06 +0300
Subject: [PATCH 0055/1943] [#203] pilorama: Refactor tests
Do not pass 0 as channel capacity.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/pilorama/forest_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index ebf801ef9..bbd35246c 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -747,7 +747,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
actual := constructor(t, WithMaxBatchSize(batchSize))
wg := new(sync.WaitGroup)
- ch := make(chan *Move, 0)
+ ch := make(chan *Move)
for i := 0; i < batchSize; i++ {
wg.Add(1)
go func() {
From 4ec69cbbf81e26cbb4fda91dbe3592b83d10f5dc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 16:58:37 +0300
Subject: [PATCH 0056/1943] [#203] control: Fix test
Fix shard compare loop.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/control/service_test.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/pkg/services/control/service_test.go b/pkg/services/control/service_test.go
index f39a304f9..8c96e2b73 100644
--- a/pkg/services/control/service_test.go
+++ b/pkg/services/control/service_test.go
@@ -85,7 +85,9 @@ func equalListShardResponseBodies(b1, b2 *control.ListShardsResponse_Body) bool
info1 := b1.Shards[i].GetBlobstor()
info2 := b2.Shards[i].GetBlobstor()
- return compareBlobstorInfo(info1, info2)
+ if !compareBlobstorInfo(info1, info2) {
+ return false
+ }
}
for i := range b1.Shards {
From 90276953719d4cf74a94a2fc10d98f1fac0aa4d9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 3 Apr 2023 17:05:10 +0300
Subject: [PATCH 0057/1943] [#203] node: Add staticcheck target and pre-commit
Signed-off-by: Dmitrii Stepanov
---
.pre-commit-config.yaml | 5 +++++
Makefile | 7 +++++++
2 files changed, 12 insertions(+)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 169b7bf1c..db9930e91 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -43,3 +43,8 @@ repos:
pass_filenames: false
types: [go]
language: system
+
+ - repo: https://github.com/TekWizely/pre-commit-golang
+ rev: v1.0.0-rc.1
+ hooks:
+ - id: go-staticcheck-repo-mod
diff --git a/Makefile b/Makefile
index 242a7ed1c..7d0f8d9e2 100755
--- a/Makefile
+++ b/Makefile
@@ -128,10 +128,17 @@ test:
@echo "⇒ Running go test"
@go test ./...
+pre-commit-run:
+ @pre-commit run -a --hook-stage manual
+
# Run linters
lint:
@golangci-lint --timeout=5m run
+# Run staticcheck
+staticcheck:
+ @staticcheck ./...
+
# Run linters in Docker
docker/lint:
docker run --rm -t \
From 56282edf02ba3494c922fa6133e2671b3f8e59e2 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 5 Apr 2023 14:56:15 +0300
Subject: [PATCH 0058/1943] [#166] node: Parallelize background tree service
sync
* Run sync task for nodes in parallel within errgroup worker pool
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
---
pkg/services/tree/sync.go | 72 +++++++++++++++++++++++++--------------
1 file changed, 47 insertions(+), 25 deletions(-)
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 2d5c104b0..32d088c01 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -17,6 +17,7 @@ import (
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@@ -129,37 +130,58 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
zap.String("tree", treeID),
zap.Uint64("from", from))
- newHeight := uint64(math.MaxUint64)
- for _, n := range nodes {
- height := from
- n.IterateNetworkEndpoints(func(addr string) bool {
- var a network.Address
- if err := a.FromString(addr); err != nil {
- return false
- }
+ errGroup, egCtx := errgroup.WithContext(ctx)
+ const workersCount = 4
+ errGroup.SetLimit(workersCount)
- cc, err := grpc.DialContext(ctx, a.URIAddr(), grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- // Failed to connect, try the next address.
- return false
- }
- defer cc.Close()
+ heights := make([]uint64, len(nodes))
+ for i, n := range nodes {
+ i := i
+ n := n
+ errGroup.Go(func() error {
+ height := from
+ n.IterateNetworkEndpoints(func(addr string) bool {
+ var a network.Address
+ if err := a.FromString(addr); err != nil {
+ return false
+ }
- treeClient := NewTreeServiceClient(cc)
- for {
- h, err := s.synchronizeSingle(ctx, cid, treeID, height, treeClient)
- if height < h {
- height = h
+ cc, err := grpc.DialContext(egCtx, a.URIAddr(), grpc.WithTransportCredentials(insecure.NewCredentials()))
+ if err != nil {
+ // Failed to connect, try the next address.
+ return false
}
- if err != nil || h <= height {
- // Error with the response, try the next node.
- return true
+ defer cc.Close()
+
+ treeClient := NewTreeServiceClient(cc)
+ for {
+ h, err := s.synchronizeSingle(egCtx, cid, treeID, height, treeClient)
+ if height < h {
+ height = h
+ }
+ if err != nil || h <= height {
+ // Error with the response, try the next node.
+ return true
+ }
}
+ })
+
+ if height <= from { // do not increase starting height on fail
+ heights[i] = from
+ return nil
}
+ heights[i] = height
+ return nil
})
- if height <= from { // do not increase starting height on fail
- newHeight = from
- } else if height < newHeight { // take minimum across all clients
+ }
+
+ if err := errGroup.Wait(); err != nil {
+ s.log.Warn("failed to run tree synchronization over all nodes", zap.Error(err))
+ }
+
+ newHeight := uint64(math.MaxUint64)
+ for _, height := range heights { // take minimum across all clients
+ if height < newHeight {
newHeight = height
}
}
From e815b19101d3619282e284acd7231b15f9357b0e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 16:58:32 +0300
Subject: [PATCH 0059/1943] [#219] morph: Resolve containedctx linter
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/accounting.go | 6 +++--
cmd/frostfs-node/main.go | 4 ++--
cmd/frostfs-node/morph.go | 5 ++--
cmd/frostfs-node/netmap.go | 5 ++--
pkg/innerring/innerring.go | 2 +-
pkg/morph/client/audit/result_test.go | 3 ++-
pkg/morph/client/constructor.go | 29 ++++-------------------
pkg/morph/client/multi.go | 33 ++++++++++++++-------------
pkg/morph/client/notifications.go | 10 ++++----
9 files changed, 43 insertions(+), 54 deletions(-)
diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go
index 26acc0437..6a35f37d0 100644
--- a/cmd/frostfs-node/accounting.go
+++ b/cmd/frostfs-node/accounting.go
@@ -1,6 +1,8 @@
package main
import (
+ "context"
+
accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc"
@@ -8,9 +10,9 @@ import (
accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph"
)
-func initAccountingService(c *cfg) {
+func initAccountingService(ctx context.Context, c *cfg) {
if c.cfgMorph.client == nil {
- initMorphComponents(c)
+ initMorphComponents(ctx, c)
}
balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0)
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index fdb003220..d5f711a51 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -95,8 +95,8 @@ func initApp(ctx context.Context, c *cfg) {
})
initAndLog(c, "gRPC", initGRPC)
- initAndLog(c, "netmap", initNetmapService)
- initAndLog(c, "accounting", initAccountingService)
+ initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) })
+ initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
initAndLog(c, "reputation", initReputationService)
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 2dfbe5c18..2db865ca3 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -27,7 +27,7 @@ const (
notaryDepositRetriesAmount = 300
)
-func initMorphComponents(c *cfg) {
+func initMorphComponents(ctx context.Context, c *cfg) {
var err error
addresses := morphconfig.RPCEndpoint(c.appCfg)
@@ -38,7 +38,8 @@ func initMorphComponents(c *cfg) {
addresses[i], addresses[j] = addresses[j], addresses[i]
})
- cli, err := client.New(c.key,
+ cli, err := client.New(ctx,
+ c.key,
client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)),
client.WithLogger(c.log),
client.WithEndpoints(addresses...),
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 845649dee..d9b1c9208 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -2,6 +2,7 @@ package main
import (
"bytes"
+ "context"
"errors"
"fmt"
@@ -135,7 +136,7 @@ func (c *cfg) addressNum() int {
return 0
}
-func initNetmapService(c *cfg) {
+func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
@@ -144,7 +145,7 @@ func initNetmapService(c *cfg) {
readSubnetCfg(c)
if c.cfgMorph.client == nil {
- initMorphComponents(c)
+ initMorphComponents(ctx, c)
}
initNetmapState(c)
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 3b42a5853..a91d2fd0d 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -464,8 +464,8 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
}
return client.New(
+ ctx,
p.key,
- client.WithContext(ctx),
client.WithLogger(p.log),
client.WithDialTimeout(p.cfg.GetDuration(p.name+".dial_timeout")),
client.WithSigner(p.sgn),
diff --git a/pkg/morph/client/audit/result_test.go b/pkg/morph/client/audit/result_test.go
index a0137e3b8..5ce1cc740 100644
--- a/pkg/morph/client/audit/result_test.go
+++ b/pkg/morph/client/audit/result_test.go
@@ -1,6 +1,7 @@
package audit
import (
+ "context"
"testing"
"time"
@@ -26,7 +27,7 @@ func TestAuditResults(t *testing.T) {
auditHash, err := util.Uint160DecodeStringLE(sAuditHash)
require.NoError(t, err)
- morphClient, err := client.New(key, client.WithEndpoints(client.Endpoint{Address: endpoint}))
+ morphClient, err := client.New(context.Background(), key, client.WithEndpoints(client.Endpoint{Address: endpoint}))
require.NoError(t, err)
auditClientWrapper, err := NewFromMorph(morphClient, auditHash, 0)
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index e4569ad0e..9ed275029 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -29,10 +29,7 @@ type Option func(*cfg)
type Callback func()
// groups the configurations with default values.
-// nolint: containedctx
type cfg struct {
- ctx context.Context // neo-go client context
-
dialTimeout time.Duration // client dial timeout
logger *logger.Logger // logging component
@@ -57,7 +54,6 @@ const (
func defaultConfig() *cfg {
return &cfg{
- ctx: context.Background(),
dialTimeout: defaultDialTimeout,
logger: &logger.Logger{Logger: zap.L()},
waitInterval: defaultWaitInterval,
@@ -84,7 +80,7 @@ func defaultConfig() *cfg {
// If desired option satisfies the default value, it can be omitted.
// If multiple options of the same config value are supplied,
// the option with the highest index in the arguments will be used.
-func New(key *keys.PrivateKey, opts ...Option) (*Client, error) {
+func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, error) {
if key == nil {
panic("empty private key")
}
@@ -142,20 +138,20 @@ func New(key *keys.PrivateKey, opts ...Option) (*Client, error) {
return nil, fmt.Errorf("could not create RPC actor: %w", err)
}
} else {
- cli.client, act, err = cli.newCli(cli.endpoints.list[0].Address)
+ cli.client, act, err = cli.newCli(ctx, cli.endpoints.list[0].Address)
if err != nil {
return nil, fmt.Errorf("could not create RPC client: %w", err)
}
}
cli.setActor(act)
- go cli.notificationLoop()
+ go cli.notificationLoop(ctx)
return cli, nil
}
-func (c *Client) newCli(endpoint string) (*rpcclient.WSClient, *actor.Actor, error) {
- cli, err := rpcclient.NewWS(c.cfg.ctx, endpoint, rpcclient.Options{
+func (c *Client) newCli(ctx context.Context, endpoint string) (*rpcclient.WSClient, *actor.Actor, error) {
+ cli, err := rpcclient.NewWS(ctx, endpoint, rpcclient.Options{
DialTimeout: c.cfg.dialTimeout,
})
if err != nil {
@@ -201,21 +197,6 @@ func newClientCache() cache {
}
}
-// WithContext returns a client constructor option that
-// specifies the neo-go client context.
-//
-// Ignores nil value. Has no effect if WithSingleClient
-// is provided.
-//
-// If option not provided, context.Background() is used.
-func WithContext(ctx context.Context) Option {
- return func(c *cfg) {
- if ctx != nil {
- c.ctx = ctx
- }
- }
-}
-
// WithDialTimeout returns a client constructor option
// that specifies neo-go client dial timeout duration.
//
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index 54af56b21..5d736839a 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -1,6 +1,7 @@
package client
import (
+ "context"
"sort"
"time"
@@ -32,7 +33,7 @@ func (e *endpoints) init(ee []Endpoint) {
e.list = ee
}
-func (c *Client) switchRPC() bool {
+func (c *Client) switchRPC(ctx context.Context) bool {
c.switchLock.Lock()
defer c.switchLock.Unlock()
@@ -41,7 +42,7 @@ func (c *Client) switchRPC() bool {
// Iterate endpoints in the order of decreasing priority.
for c.endpoints.curr = range c.endpoints.list {
newEndpoint := c.endpoints.list[c.endpoints.curr].Address
- cli, act, err := c.newCli(newEndpoint)
+ cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
c.logger.Warn("could not establish connection to the switched RPC node",
zap.String("endpoint", newEndpoint),
@@ -56,7 +57,7 @@ func (c *Client) switchRPC() bool {
c.logger.Info("connection to the new RPC node has been established",
zap.String("endpoint", newEndpoint))
- subs, ok := c.restoreSubscriptions(cli, newEndpoint, false)
+ subs, ok := c.restoreSubscriptions(ctx, cli, newEndpoint, false)
if !ok {
// new WS client does not allow
// restoring subscription, client
@@ -74,7 +75,7 @@ func (c *Client) switchRPC() bool {
if c.cfg.switchInterval != 0 && !c.switchIsActive.Load() &&
c.endpoints.list[c.endpoints.curr].Priority != c.endpoints.list[0].Priority {
c.switchIsActive.Store(true)
- go c.switchToMostPrioritized()
+ go c.switchToMostPrioritized(ctx)
}
return true
@@ -83,7 +84,7 @@ func (c *Client) switchRPC() bool {
return false
}
-func (c *Client) notificationLoop() {
+func (c *Client) notificationLoop(ctx context.Context) {
var e any
var ok bool
@@ -95,7 +96,7 @@ func (c *Client) notificationLoop() {
c.switchLock.RUnlock()
select {
- case <-c.cfg.ctx.Done():
+ case <-ctx.Done():
_ = c.UnsubscribeAll()
c.close()
@@ -111,17 +112,17 @@ func (c *Client) notificationLoop() {
}
if ok {
- c.routeEvent(e)
+ c.routeEvent(ctx, e)
continue
}
- if !c.reconnect() {
+ if !c.reconnect(ctx) {
return
}
}
}
-func (c *Client) routeEvent(e any) {
+func (c *Client) routeEvent(ctx context.Context, e any) {
typedNotification := rpcclient.Notification{Value: e}
switch e.(type) {
@@ -135,7 +136,7 @@ func (c *Client) routeEvent(e any) {
select {
case c.notifications <- typedNotification:
- case <-c.cfg.ctx.Done():
+ case <-ctx.Done():
_ = c.UnsubscribeAll()
c.close()
case <-c.closeChan:
@@ -144,7 +145,7 @@ func (c *Client) routeEvent(e any) {
}
}
-func (c *Client) reconnect() bool {
+func (c *Client) reconnect(ctx context.Context) bool {
if closeErr := c.client.GetError(); closeErr != nil {
c.logger.Warn("switching to the next RPC node",
zap.String("reason", closeErr.Error()),
@@ -156,7 +157,7 @@ func (c *Client) reconnect() bool {
return true
}
- if !c.switchRPC() {
+ if !c.switchRPC(ctx) {
c.logger.Error("could not establish connection to any RPC node")
// could not connect to all endpoints =>
@@ -173,7 +174,7 @@ func (c *Client) reconnect() bool {
return true
}
-func (c *Client) switchToMostPrioritized() {
+func (c *Client) switchToMostPrioritized(ctx context.Context) {
t := time.NewTicker(c.cfg.switchInterval)
defer t.Stop()
defer c.switchIsActive.Store(false)
@@ -181,7 +182,7 @@ func (c *Client) switchToMostPrioritized() {
mainLoop:
for {
select {
- case <-c.cfg.ctx.Done():
+ case <-ctx.Done():
return
case <-t.C:
c.switchLock.RLock()
@@ -207,7 +208,7 @@ mainLoop:
tryE := e.Address
- cli, act, err := c.newCli(tryE)
+ cli, act, err := c.newCli(ctx, tryE)
if err != nil {
c.logger.Warn("could not create client to the higher priority node",
zap.String("endpoint", tryE),
@@ -216,7 +217,7 @@ mainLoop:
continue
}
- if subs, ok := c.restoreSubscriptions(cli, tryE, true); ok {
+ if subs, ok := c.restoreSubscriptions(ctx, cli, tryE, true); ok {
c.switchLock.Lock()
// higher priority node could have been
diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go
index ed0368700..300bab825 100644
--- a/pkg/morph/client/notifications.go
+++ b/pkg/morph/client/notifications.go
@@ -1,6 +1,8 @@
package client
import (
+ "context"
+
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc"
@@ -227,7 +229,7 @@ type subsInfo struct {
// one contains subscription information applied to the passed cli
// and receivers for the updated subscriptions.
// Does not change Client instance.
-func (c *Client) restoreSubscriptions(cli *rpcclient.WSClient, endpoint string, background bool) (si subsInfo, ok bool) {
+func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClient, endpoint string, background bool) (si subsInfo, ok bool) {
var (
err error
id string
@@ -240,7 +242,7 @@ func (c *Client) restoreSubscriptions(cli *rpcclient.WSClient, endpoint string,
notificationRcv := make(chan *state.ContainedNotificationEvent)
notaryReqRcv := make(chan *result.NotaryRequestEvent)
- c.startListen(stopCh, blockRcv, notificationRcv, notaryReqRcv, background)
+ c.startListen(ctx, stopCh, blockRcv, notificationRcv, notaryReqRcv, background)
if background {
c.switchLock.RLock()
@@ -304,7 +306,7 @@ func (c *Client) restoreSubscriptions(cli *rpcclient.WSClient, endpoint string,
return si, true
}
-func (c *Client) startListen(stopCh <-chan struct{}, blockRcv <-chan *block.Block,
+func (c *Client) startListen(ctx context.Context, stopCh <-chan struct{}, blockRcv <-chan *block.Block,
notificationRcv <-chan *state.ContainedNotificationEvent, notaryReqRcv <-chan *result.NotaryRequestEvent, background bool) {
// neo-go WS client says to _always_ read notifications
// from its channel. Subscribing to any notification
@@ -335,7 +337,7 @@ func (c *Client) startListen(stopCh <-chan struct{}, blockRcv <-chan *block.Bloc
continue
}
- c.routeEvent(e)
+ c.routeEvent(ctx, e)
}
}()
}
From 775179f82354b8c5985df77b427c1c49814ebb62 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 17:52:32 +0300
Subject: [PATCH 0060/1943] [#219] morph: Refactor notary invoke
Resolve funlen linter for Client.notaryInvoke method.
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/client/notary.go | 114 +++++++++++++++++++++----------------
1 file changed, 65 insertions(+), 49 deletions(-)
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 9c7071e5a..96dca0319 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -16,6 +16,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/neorpc"
+ "github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/notary"
sc "github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -446,19 +447,12 @@ func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args
return c.notaryInvoke(true, true, designate, nonce, &vub, method, args...)
}
-// nolint: funlen
func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) error {
- alphabetList, err := c.notary.alphabetSource() // prepare arguments for test invocation
+ alphabetList, err := c.notary.alphabetSource()
if err != nil {
return err
}
- u8n := uint8(len(alphabetList))
-
- if !invokedByAlpha {
- u8n++
- }
-
cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee)
if err != nil {
return err
@@ -469,38 +463,77 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
return err
}
- // make test invocation of the method
- test, err := c.client.InvokeFunction(contract, method, params, cosigners)
+ test, err := c.makeTestInvocation(contract, method, params, cosigners)
if err != nil {
return err
}
- // check invocation state
- if test.State != HaltState {
- return wrapFrostFSError(¬HaltStateError{state: test.State, exception: test.FaultException})
- }
-
- // if test invocation failed, then return error
- if len(test.Script) == 0 {
- return wrapFrostFSError(errEmptyInvocationScript)
- }
-
- // after test invocation we build main multisig transaction
-
multiaddrAccount, err := c.notaryMultisigAccount(alphabetList, committee, invokedByAlpha)
if err != nil {
return err
}
- var until uint32
+ until, err := c.getUntilValue(vub)
+ if err != nil {
+ return err
+ }
+ mainTx, err := c.buildMainTx(invokedByAlpha, nonce, alphabetList, test, cosigners, multiaddrAccount, until)
+ if err != nil {
+ return err
+ }
+
+ //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
+ resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
+ []byte{byte(opcode.RET)},
+ -1,
+ 0,
+ c.notary.fallbackTime,
+ c.acc)
+ if err != nil && !alreadyOnChainError(err) {
+ return err
+ }
+
+ c.logger.Debug("notary request invoked",
+ zap.String("method", method),
+ zap.Uint32("valid_until_block", until),
+ zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
+ zap.Stringer("tx_hash", resp.Hash().Reverse()))
+
+ return nil
+}
+
+func (c *Client) makeTestInvocation(contract util.Uint160, method string, params []sc.Parameter, cosigners []transaction.Signer) (*result.Invoke, error) {
+ test, err := c.client.InvokeFunction(contract, method, params, cosigners)
+ if err != nil {
+ return nil, err
+ }
+
+ if test.State != HaltState {
+ return nil, wrapFrostFSError(¬HaltStateError{state: test.State, exception: test.FaultException})
+ }
+
+ if len(test.Script) == 0 {
+ return nil, wrapFrostFSError(errEmptyInvocationScript)
+ }
+ return test, nil
+}
+
+func (c *Client) getUntilValue(vub *uint32) (uint32, error) {
if vub != nil {
- until = *vub
- } else {
- until, err = c.notaryTxValidationLimit()
- if err != nil {
- return err
- }
+ return *vub, nil
+ }
+ return c.notaryTxValidationLimit()
+}
+
+func (c *Client) buildMainTx(invokedByAlpha bool, nonce uint32, alphabetList keys.PublicKeys, test *result.Invoke,
+ cosigners []transaction.Signer, multiaddrAccount *wallet.Account, until uint32) (*transaction.Transaction, error) {
+ // after test invocation we build main multisig transaction
+
+ u8n := uint8(len(alphabetList))
+
+ if !invokedByAlpha {
+ u8n++
}
// prepare main tx
@@ -522,7 +555,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
//lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
notaryFee, err := c.client.CalculateNotaryFee(u8n)
if err != nil {
- return err
+ return nil, err
}
// add network fee for cosigners
@@ -534,30 +567,13 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
c.notaryAccounts(invokedByAlpha, multiaddrAccount)...,
)
if err != nil {
- return err
+ return nil, err
}
// define witnesses
mainTx.Scripts = c.notaryWitnesses(invokedByAlpha, multiaddrAccount, mainTx)
- //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
- resp, err := c.client.SignAndPushP2PNotaryRequest(mainTx,
- []byte{byte(opcode.RET)},
- -1,
- 0,
- c.notary.fallbackTime,
- c.acc)
- if err != nil && !alreadyOnChainError(err) {
- return err
- }
-
- c.logger.Debug("notary request invoked",
- zap.String("method", method),
- zap.Uint32("valid_until_block", until),
- zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
- zap.Stringer("tx_hash", resp.Hash().Reverse()))
-
- return nil
+ return mainTx, nil
}
func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, committee bool) ([]transaction.Signer, error) {
From d07e40d6fe0ce96ba4a6b6e709991db84bce368c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 18:11:56 +0300
Subject: [PATCH 0061/1943] [#219] morph: Refactor moprh event listener
Resolve funlen and gocognit linters for listener.listenLoop method.
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/event/listener.go | 54 ++++++++++++++++++++++---------------
1 file changed, 33 insertions(+), 21 deletions(-)
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index 0bc7e89f8..3de199328 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc/result"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -210,7 +211,6 @@ func (l *listener) subscribe(errCh chan error) {
}
}
-// nolint: funlen, gocognit
func (l *listener) listenLoop(ctx context.Context, intErr chan<- error, subErrCh chan error) {
chs := l.subscriber.NotificationChannels()
@@ -243,12 +243,7 @@ loop:
continue loop
}
- if err := l.pool.Submit(func() {
- l.parseAndHandleNotification(notifyEvent)
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
+ l.handleNotifyEvent(notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
l.log.Warn("stop event listener by notary channel")
@@ -262,12 +257,7 @@ loop:
continue loop
}
- if err := l.pool.Submit(func() {
- l.parseAndHandleNotary(notaryEvent)
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
+ l.handleNotaryEvent(notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
l.log.Warn("stop event listener by block channel")
@@ -281,18 +271,40 @@ loop:
continue loop
}
- if err := l.pool.Submit(func() {
- for i := range l.blockHandlers {
- l.blockHandlers[i](b)
- }
- }); err != nil {
- l.log.Warn("listener worker pool drained",
- zap.Int("capacity", l.pool.Cap()))
- }
+ l.handleBlockEvent(b)
}
}
}
+func (l *listener) handleBlockEvent(b *block.Block) {
+ if err := l.pool.Submit(func() {
+ for i := range l.blockHandlers {
+ l.blockHandlers[i](b)
+ }
+ }); err != nil {
+ l.log.Warn("listener worker pool drained",
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
+func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
+ if err := l.pool.Submit(func() {
+ l.parseAndHandleNotary(notaryEvent)
+ }); err != nil {
+ l.log.Warn("listener worker pool drained",
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
+func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) {
+ if err := l.pool.Submit(func() {
+ l.parseAndHandleNotification(notifyEvent)
+ }); err != nil {
+ l.log.Warn("listener worker pool drained",
+ zap.Int("capacity", l.pool.Cap()))
+ }
+}
+
func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) {
log := l.log.With(
zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()),
From fe8773507342a753e475e4884fa40a592bfce128 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 18:21:14 +0300
Subject: [PATCH 0062/1943] [#219] morph: Refactor notary preparator
Resolve funlen linter for Preparator.Prepare method.
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/event/notary_preparator.go | 103 ++++++++++++++-------------
1 file changed, 55 insertions(+), 48 deletions(-)
diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go
index f85b5b648..3d499fec5 100644
--- a/pkg/morph/event/notary_preparator.go
+++ b/pkg/morph/event/notary_preparator.go
@@ -103,55 +103,8 @@ func notaryPreparator(prm PreparatorPrm) NotaryPreparator {
// transaction is expected to be received one more time
// from the Notary service but already signed. This happens
// since every notary call is a new notary request in fact.
-//
-// nolint: funlen
func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
- // notary request's main tx is expected to have
- // three or four witnesses: one for proxy contract,
- // one for alphabet multisignature, one optional for
- // notary's invoker and one is for notary contract
- ln := len(nr.MainTransaction.Scripts)
- switch ln {
- case 3, 4:
- default:
- return nil, errUnexpectedWitnessAmount
- }
- invokerWitness := ln == 4
-
- // alphabet node should handle only notary requests
- // that have been sent unsigned(by storage nodes) =>
- // such main TXs should have dummy scripts as an
- // invocation script
- //
- // this check prevents notary flow recursion
- if !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) {
- return nil, ErrTXAlreadyHandled
- }
-
- currentAlphabet, err := p.alphaKeys()
- if err != nil {
- return nil, fmt.Errorf("could not fetch Alphabet public keys: %w", err)
- }
-
- err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
- if err != nil {
- return nil, err
- }
-
- // validate main TX's notary attribute
- err = p.validateAttributes(nr.MainTransaction.Attributes, currentAlphabet, invokerWitness)
- if err != nil {
- return nil, err
- }
-
- // validate main TX's witnesses
- err = p.validateWitnesses(nr.MainTransaction.Scripts, currentAlphabet, invokerWitness)
- if err != nil {
- return nil, err
- }
-
- // validate main TX expiration
- err = p.validateExpiration(nr.FallbackTransaction)
+ err := p.validateNotaryRequest(nr)
if err != nil {
return nil, err
}
@@ -219,6 +172,60 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) {
}, nil
}
+func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error {
+ // notary request's main tx is expected to have
+ // three or four witnesses: one for proxy contract,
+ // one for alphabet multisignature, one optional for
+ // notary's invoker and one is for notary contract
+ ln := len(nr.MainTransaction.Scripts)
+ switch ln {
+ case 3, 4:
+ default:
+ return errUnexpectedWitnessAmount
+ }
+ invokerWitness := ln == 4
+
+ // alphabet node should handle only notary requests
+ // that have been sent unsigned(by storage nodes) =>
+ // such main TXs should have dummy scripts as an
+ // invocation script
+ //
+ // this check prevents notary flow recursion
+ if !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) {
+ return ErrTXAlreadyHandled
+ }
+
+ currentAlphabet, err := p.alphaKeys()
+ if err != nil {
+ return fmt.Errorf("could not fetch Alphabet public keys: %w", err)
+ }
+
+ err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX's notary attribute
+ err = p.validateAttributes(nr.MainTransaction.Attributes, currentAlphabet, invokerWitness)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX's witnesses
+ err = p.validateWitnesses(nr.MainTransaction.Scripts, currentAlphabet, invokerWitness)
+ if err != nil {
+ return err
+ }
+
+ // validate main TX expiration
+ err = p.validateExpiration(nr.FallbackTransaction)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (p Preparator) validateParameterOpcodes(ops []Op) error {
l := len(ops)
From 3dbff0a478d60423ccb5a5d84934bd977c50e913 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 09:45:10 +0300
Subject: [PATCH 0063/1943] [#222] auditsvc: Resolve containedctx linter
Resolve containedctx linter for commonCommunicatorPrm type.
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/rpc.go | 8 ++++----
pkg/services/audit/auditor/context.go | 7 ++-----
pkg/services/audit/auditor/pdp.go | 3 +--
pkg/services/audit/auditor/pop.go | 3 +--
pkg/services/audit/auditor/por.go | 3 +--
5 files changed, 9 insertions(+), 15 deletions(-)
diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go
index 665e94232..c58e0ddc4 100644
--- a/pkg/innerring/rpc.go
+++ b/pkg/innerring/rpc.go
@@ -131,7 +131,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
}
// GetHeader requests node from the container under audit to return object header by id.
-func (c *ClientCache) GetHeader(prm auditor.GetHeaderPrm) (*object.Object, error) {
+func (c *ClientCache) GetHeader(ctx context.Context, prm auditor.GetHeaderPrm) (*object.Object, error) {
var objAddress oid.Address
objAddress.SetContainer(prm.CID)
objAddress.SetObject(prm.OID)
@@ -148,7 +148,7 @@ func (c *ClientCache) GetHeader(prm auditor.GetHeaderPrm) (*object.Object, error
return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err)
}
- cctx, cancel := context.WithTimeout(prm.Context, c.headTimeout)
+ cctx, cancel := context.WithTimeout(ctx, c.headTimeout)
var obj *object.Object
@@ -169,7 +169,7 @@ func (c *ClientCache) GetHeader(prm auditor.GetHeaderPrm) (*object.Object, error
// GetRangeHash requests node from the container under audit to return Tillich-Zemor hash of the
// payload range of the object with specified identifier.
-func (c *ClientCache) GetRangeHash(prm auditor.GetRangeHashPrm) ([]byte, error) {
+func (c *ClientCache) GetRangeHash(ctx context.Context, prm auditor.GetRangeHashPrm) ([]byte, error) {
var objAddress oid.Address
objAddress.SetContainer(prm.CID)
objAddress.SetObject(prm.OID)
@@ -186,7 +186,7 @@ func (c *ClientCache) GetRangeHash(prm auditor.GetRangeHashPrm) ([]byte, error)
return nil, fmt.Errorf("can't setup remote connection with %s: %w", info.AddressGroup(), err)
}
- cctx, cancel := context.WithTimeout(prm.Context, c.rangeTimeout)
+ cctx, cancel := context.WithTimeout(ctx, c.rangeTimeout)
h, err := frostfsapiclient.HashObjectRange(cctx, cli, objAddress, prm.Range)
diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go
index 4a5cbb457..f2778fd65 100644
--- a/pkg/services/audit/auditor/context.go
+++ b/pkg/services/audit/auditor/context.go
@@ -81,10 +81,7 @@ type ContextPrm struct {
pdpWorkerPool, porWorkerPool util.WorkerPool
}
-// nolint: containedctx
type commonCommunicatorPrm struct {
- Context context.Context
-
Node netmap.NodeInfo
OID oid.ID
@@ -109,11 +106,11 @@ type GetRangeHashPrm struct {
// component of communication with container nodes.
type ContainerCommunicator interface {
// GetHeader must return object header from the container node.
- GetHeader(GetHeaderPrm) (*object.Object, error)
+ GetHeader(context.Context, GetHeaderPrm) (*object.Object, error)
// GetRangeHash must return homomorphic Tillich-Zemor hash of payload range of the
// object stored in container node.
- GetRangeHash(GetRangeHashPrm) ([]byte, error)
+ GetRangeHash(context.Context, GetRangeHashPrm) ([]byte, error)
}
// NewContext creates, initializes and returns Context.
diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go
index beb2fdcf8..13b50e498 100644
--- a/pkg/services/audit/auditor/pdp.go
+++ b/pkg/services/audit/auditor/pdp.go
@@ -118,7 +118,6 @@ func (c *Context) collectHashes(p *gamePair) {
rand.Shuffle(len(order), func(i, j int) { order[i], order[j] = order[j], order[i] })
var getRangeHashPrm GetRangeHashPrm
- getRangeHashPrm.Context = c.task.AuditContext()
getRangeHashPrm.CID = c.task.ContainerID()
getRangeHashPrm.OID = p.id
getRangeHashPrm.Node = n
@@ -138,7 +137,7 @@ func (c *Context) collectHashes(p *gamePair) {
getRangeHashPrm.Range = rngs[i]
- h, err := c.cnrCom.GetRangeHash(getRangeHashPrm)
+ h, err := c.cnrCom.GetRangeHash(c.task.AuditContext(), getRangeHashPrm)
if err != nil {
c.log.Debug("could not get payload range hash",
zap.Stringer("id", p.id),
diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go
index f8a16cb0a..45afa7937 100644
--- a/pkg/services/audit/auditor/pop.go
+++ b/pkg/services/audit/auditor/pop.go
@@ -49,7 +49,6 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, rep
)
var getHeaderPrm GetHeaderPrm
- getHeaderPrm.Context = c.task.AuditContext()
getHeaderPrm.OID = id
getHeaderPrm.CID = c.task.ContainerID()
getHeaderPrm.NodeIsRelay = false
@@ -58,7 +57,7 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, rep
getHeaderPrm.Node = nodes[i]
// try to get object header from node
- hdr, err := c.cnrCom.GetHeader(getHeaderPrm)
+ hdr, err := c.cnrCom.GetHeader(c.task.AuditContext(), getHeaderPrm)
if err != nil {
c.log.Debug("could not get object header from candidate",
zap.Stringer("id", id),
diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go
index 1922213e5..ff322d6e1 100644
--- a/pkg/services/audit/auditor/por.go
+++ b/pkg/services/audit/auditor/por.go
@@ -48,7 +48,6 @@ func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGr
)
var getHeaderPrm GetHeaderPrm
- getHeaderPrm.Context = c.task.AuditContext()
getHeaderPrm.CID = c.task.ContainerID()
getHeaderPrm.NodeIsRelay = true
@@ -81,7 +80,7 @@ func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGr
getHeaderPrm.Node = flat[j]
- hdr, err := c.cnrCom.GetHeader(getHeaderPrm)
+ hdr, err := c.cnrCom.GetHeader(c.task.AuditContext(), getHeaderPrm)
if err != nil {
c.log.Debug("can't head object",
zap.String("remote_node", netmap.StringifyPublicKey(flat[j])),
From e8d340287f34073a08ba88a33a833ef290709bd9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 11:36:25 +0300
Subject: [PATCH 0064/1943] [#222] auditsvc: Refactor audit task
Resolve containedctx linter. Cancel task by listen cancel.
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/processors/audit/process.go | 22 +++++++++++----------
pkg/innerring/processors/audit/processor.go | 2 +-
pkg/services/audit/auditor/context.go | 4 +---
pkg/services/audit/auditor/exec.go | 10 ++++++----
pkg/services/audit/auditor/pdp.go | 17 ++++++++--------
pkg/services/audit/auditor/pop.go | 14 +++++++------
pkg/services/audit/auditor/por.go | 9 +++++----
pkg/services/audit/task.go | 20 +++++++------------
pkg/services/audit/taskmanager/listen.go | 21 +++++++++++++++-----
pkg/services/audit/taskmanager/push.go | 5 +----
10 files changed, 66 insertions(+), 58 deletions(-)
diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go
index 7f148e57e..ecfc407be 100644
--- a/pkg/innerring/processors/audit/process.go
+++ b/pkg/innerring/processors/audit/process.go
@@ -46,15 +46,21 @@ func (ap *Processor) processStartAudit(epoch uint64) {
return
}
- var auditCtx context.Context
- auditCtx, ap.prevAuditCanceler = context.WithCancel(context.Background())
+ cancelChannel := make(chan struct{})
+ ap.prevAuditCanceler = func() {
+ select {
+ case <-cancelChannel: // already closed
+ default:
+ close(cancelChannel)
+ }
+ }
pivot := make([]byte, sha256.Size)
- ap.startAuditTasksOnContainers(auditCtx, containers, log, pivot, nm, epoch)
+ ap.startAuditTasksOnContainers(cancelChannel, containers, log, pivot, nm, epoch)
}
-func (ap *Processor) startAuditTasksOnContainers(ctx context.Context, containers []cid.ID, log *zap.Logger, pivot []byte, nm *netmap.NetMap, epoch uint64) {
+func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{}, containers []cid.ID, log *zap.Logger, pivot []byte, nm *netmap.NetMap, epoch uint64) {
for i := range containers {
cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
if err != nil {
@@ -107,18 +113,14 @@ func (ap *Processor) startAuditTasksOnContainers(ctx context.Context, containers
epoch: epoch,
rep: ap.reporter,
}).
- WithAuditContext(ctx).
+ WithCancelChannel(cancelChannel).
WithContainerID(containers[i]).
WithStorageGroupList(storageGroups).
WithContainerStructure(cnr.Value).
WithContainerNodes(nodes).
WithNetworkMap(nm)
- if err := ap.taskManager.PushTask(auditTask); err != nil {
- ap.log.Error("could not push audit task",
- zap.String("error", err.Error()),
- )
- }
+ ap.taskManager.PushTask(auditTask)
}
}
diff --git a/pkg/innerring/processors/audit/processor.go b/pkg/innerring/processors/audit/processor.go
index cb514b165..31e8a8c55 100644
--- a/pkg/innerring/processors/audit/processor.go
+++ b/pkg/innerring/processors/audit/processor.go
@@ -24,7 +24,7 @@ type (
}
TaskManager interface {
- PushTask(*audit.Task) error
+ PushTask(*audit.Task)
// Must skip all tasks planned for execution and
// return their number.
diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go
index f2778fd65..bf720c330 100644
--- a/pkg/services/audit/auditor/context.go
+++ b/pkg/services/audit/auditor/context.go
@@ -194,9 +194,7 @@ func (c *Context) init() {
)}
}
-func (c *Context) expired() bool {
- ctx := c.task.AuditContext()
-
+func (c *Context) expired(ctx context.Context) bool {
select {
case <-ctx.Done():
c.log.Debug("audit context is done",
diff --git a/pkg/services/audit/auditor/exec.go b/pkg/services/audit/auditor/exec.go
index ceb6556e2..e603818b8 100644
--- a/pkg/services/audit/auditor/exec.go
+++ b/pkg/services/audit/auditor/exec.go
@@ -1,16 +1,18 @@
package auditor
import (
+ "context"
"fmt"
)
// Execute audits container data.
-func (c *Context) Execute() {
+func (c *Context) Execute(ctx context.Context, onCompleted func()) {
+ defer onCompleted()
c.init()
checks := []struct {
name string
- exec func()
+ exec func(context.Context)
}{
{name: "PoR", exec: c.executePoR},
{name: "PoP", exec: c.executePoP},
@@ -20,11 +22,11 @@ func (c *Context) Execute() {
for i := range checks {
c.log.Debug(fmt.Sprintf("executing %s check...", checks[i].name))
- if c.expired() {
+ if c.expired(ctx) {
break
}
- checks[i].exec()
+ checks[i].exec(ctx)
if i == len(checks)-1 {
c.complete()
diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go
index 13b50e498..8a184eb7e 100644
--- a/pkg/services/audit/auditor/pdp.go
+++ b/pkg/services/audit/auditor/pdp.go
@@ -2,6 +2,7 @@ package auditor
import (
"bytes"
+ "context"
"sync"
"time"
@@ -13,12 +14,12 @@ import (
"go.uber.org/zap"
)
-func (c *Context) executePDP() {
- c.processPairs()
+func (c *Context) executePDP(ctx context.Context) {
+ c.processPairs(ctx)
c.writePairsResult()
}
-func (c *Context) processPairs() {
+func (c *Context) processPairs(ctx context.Context) {
wg := new(sync.WaitGroup)
for i := range c.pairs {
@@ -26,7 +27,7 @@ func (c *Context) processPairs() {
wg.Add(1)
if err := c.pdpWorkerPool.Submit(func() {
- c.processPair(p)
+ c.processPair(ctx, p)
wg.Done()
}); err != nil {
wg.Done()
@@ -37,9 +38,9 @@ func (c *Context) processPairs() {
c.pdpWorkerPool.Release()
}
-func (c *Context) processPair(p *gamePair) {
+func (c *Context) processPair(ctx context.Context, p *gamePair) {
c.distributeRanges(p)
- c.collectHashes(p)
+ c.collectHashes(ctx, p)
c.analyzeHashes(p)
}
@@ -106,7 +107,7 @@ func (c *Context) splitPayload(id oid.ID) []uint64 {
return notches
}
-func (c *Context) collectHashes(p *gamePair) {
+func (c *Context) collectHashes(ctx context.Context, p *gamePair) {
fn := func(n netmap.NodeInfo, rngs []*object.Range) [][]byte {
// Here we randomize the order a bit: the hypothesis is that this
// makes it harder for an unscrupulous node to come up with a
@@ -137,7 +138,7 @@ func (c *Context) collectHashes(p *gamePair) {
getRangeHashPrm.Range = rngs[i]
- h, err := c.cnrCom.GetRangeHash(c.task.AuditContext(), getRangeHashPrm)
+ h, err := c.cnrCom.GetRangeHash(ctx, getRangeHashPrm)
if err != nil {
c.log.Debug("could not get payload range hash",
zap.Stringer("id", p.id),
diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go
index 45afa7937..32b837794 100644
--- a/pkg/services/audit/auditor/pop.go
+++ b/pkg/services/audit/auditor/pop.go
@@ -1,6 +1,8 @@
package auditor
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -12,8 +14,8 @@ const (
minGamePayloadSize = hashRangeNumber * tz.Size
)
-func (c *Context) executePoP() {
- c.buildCoverage()
+func (c *Context) executePoP(ctx context.Context) {
+ c.buildCoverage(ctx)
c.report.SetPlacementCounters(
c.counters.hit,
@@ -22,13 +24,13 @@ func (c *Context) executePoP() {
)
}
-func (c *Context) buildCoverage() {
+func (c *Context) buildCoverage(ctx context.Context) {
policy := c.task.ContainerStructure().PlacementPolicy()
// select random member from another storage group
// and process all placement vectors
c.iterateSGMembersPlacementRand(func(id oid.ID, ind int, nodes []netmap.NodeInfo) bool {
- c.processObjectPlacement(id, nodes, policy.ReplicaNumberByIndex(ind))
+ c.processObjectPlacement(ctx, id, nodes, policy.ReplicaNumberByIndex(ind))
return c.containerCovered()
})
}
@@ -38,7 +40,7 @@ func (c *Context) containerCovered() bool {
return c.cnrNodesNum <= len(c.pairedNodes)
}
-func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, replicas uint32) {
+func (c *Context) processObjectPlacement(ctx context.Context, id oid.ID, nodes []netmap.NodeInfo, replicas uint32) {
var (
ok uint32
optimal bool
@@ -57,7 +59,7 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, rep
getHeaderPrm.Node = nodes[i]
// try to get object header from node
- hdr, err := c.cnrCom.GetHeader(c.task.AuditContext(), getHeaderPrm)
+ hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm)
if err != nil {
c.log.Debug("could not get object header from candidate",
zap.Stringer("id", id),
diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go
index ff322d6e1..6011217f8 100644
--- a/pkg/services/audit/auditor/por.go
+++ b/pkg/services/audit/auditor/por.go
@@ -2,6 +2,7 @@ package auditor
import (
"bytes"
+ "context"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -14,7 +15,7 @@ import (
"go.uber.org/zap"
)
-func (c *Context) executePoR() {
+func (c *Context) executePoR(ctx context.Context) {
wg := new(sync.WaitGroup)
sgs := c.task.StorageGroupList()
@@ -22,7 +23,7 @@ func (c *Context) executePoR() {
wg.Add(1)
if err := c.porWorkerPool.Submit(func() {
- c.checkStorageGroupPoR(sg.ID(), sg.StorageGroup())
+ c.checkStorageGroupPoR(ctx, sg.ID(), sg.StorageGroup())
wg.Done()
}); err != nil {
wg.Done()
@@ -36,7 +37,7 @@ func (c *Context) executePoR() {
}
// nolint: funlen
-func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGroup) {
+func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg storagegroupSDK.StorageGroup) {
members := sg.Members()
c.updateSGInfo(sgID, members)
@@ -80,7 +81,7 @@ func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGr
getHeaderPrm.Node = flat[j]
- hdr, err := c.cnrCom.GetHeader(c.task.AuditContext(), getHeaderPrm)
+ hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm)
if err != nil {
c.log.Debug("can't head object",
zap.String("remote_node", netmap.StringifyPublicKey(flat[j])),
diff --git a/pkg/services/audit/task.go b/pkg/services/audit/task.go
index 35932a69e..3de5ac2c6 100644
--- a/pkg/services/audit/task.go
+++ b/pkg/services/audit/task.go
@@ -1,8 +1,6 @@
package audit
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -10,11 +8,10 @@ import (
)
// Task groups groups the container audit parameters.
-// nolint: containedctx
type Task struct {
- reporter Reporter
+ cancelCh <-chan struct{}
- auditContext context.Context
+ reporter Reporter
idCnr cid.ID
@@ -41,18 +38,15 @@ func (t *Task) Reporter() Reporter {
return t.reporter
}
-// WithAuditContext sets context of the audit of the current epoch.
-func (t *Task) WithAuditContext(ctx context.Context) *Task {
- if t != nil {
- t.auditContext = ctx
+func (t *Task) WithCancelChannel(ch <-chan struct{}) *Task {
+ if ch != nil {
+ t.cancelCh = ch
}
-
return t
}
-// AuditContext returns context of the audit of the current epoch.
-func (t *Task) AuditContext() context.Context {
- return t.auditContext
+func (t *Task) CancelChannel() <-chan struct{} {
+ return t.cancelCh
}
// WithContainerID sets identifier of the container under audit.
diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go
index 4e8a3df68..a16052e13 100644
--- a/pkg/services/audit/taskmanager/listen.go
+++ b/pkg/services/audit/taskmanager/listen.go
@@ -33,18 +33,28 @@ func (m *Manager) Listen(ctx context.Context) {
return
}
- m.handleTask(task)
+ tCtx, tCancel := context.WithCancel(ctx) // cancel task in case of listen cancel
+ go func() {
+ select {
+ case <-tCtx.Done(): // listen cancelled or task completed
+ return
+ case <-task.CancelChannel(): // new epoch
+ tCancel()
+ }
+ }()
+
+ m.handleTask(tCtx, task, tCancel)
}
}
}
-func (m *Manager) handleTask(task *audit.Task) {
+func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted func()) {
pdpPool, err := m.pdpPoolGenerator()
if err != nil {
m.log.Error("could not generate PDP worker pool",
zap.String("error", err.Error()),
)
-
+ onCompleted()
return
}
@@ -53,7 +63,7 @@ func (m *Manager) handleTask(task *audit.Task) {
m.log.Error("could not generate PoR worker pool",
zap.String("error", err.Error()),
)
-
+ onCompleted()
return
}
@@ -61,9 +71,10 @@ func (m *Manager) handleTask(task *audit.Task) {
WithPDPWorkerPool(pdpPool).
WithPoRWorkerPool(porPool)
- if err := m.workerPool.Submit(auditContext.Execute); err != nil {
+ if err := m.workerPool.Submit(func() { auditContext.Execute(ctx, onCompleted) }); err != nil {
// may be we should report it
m.log.Warn("could not submit audit task")
+ onCompleted()
}
}
diff --git a/pkg/services/audit/taskmanager/push.go b/pkg/services/audit/taskmanager/push.go
index 13f8fd12d..805897dbf 100644
--- a/pkg/services/audit/taskmanager/push.go
+++ b/pkg/services/audit/taskmanager/push.go
@@ -5,9 +5,6 @@ import (
)
// PushTask adds a task to the queue for processing.
-//
-// Returns error if task was not added to the queue.
-func (m *Manager) PushTask(t *audit.Task) error {
+func (m *Manager) PushTask(t *audit.Task) {
m.ch <- t
- return nil
}
From e2f13d03d7c25b7399e817642a9532eff0bd8be2 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 12:18:58 +0300
Subject: [PATCH 0065/1943] [#222] auditsvc: Refactor PoR audit
Resolve funlen linter for Context.checkStorageGroupPoR method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/audit/auditor/por.go | 43 +++++++++++++++++++------------
1 file changed, 27 insertions(+), 16 deletions(-)
diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go
index 6011217f8..aebc25c68 100644
--- a/pkg/services/audit/auditor/por.go
+++ b/pkg/services/audit/auditor/por.go
@@ -36,7 +36,6 @@ func (c *Context) executePoR(ctx context.Context) {
c.report.SetPoRCounters(c.porRequests.Load(), c.porRetries.Load())
}
-// nolint: funlen
func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg storagegroupSDK.StorageGroup) {
members := sg.Members()
c.updateSGInfo(sgID, members)
@@ -55,22 +54,11 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
homomorphicHashingEnabled := !containerSDK.IsHomomorphicHashingDisabled(c.task.ContainerStructure())
for i := range members {
- objectPlacement, err := c.buildPlacement(members[i])
- if err != nil {
- c.log.Info("can't build placement for storage group member",
- zap.Stringer("sg", sgID),
- zap.String("member_id", members[i].String()),
- )
-
+ flat, ok := c.getShuffledNodes(members[i], sgID)
+ if !ok {
continue
}
- flat := placement.FlattenNodes(objectPlacement)
-
- rand.Shuffle(len(flat), func(i, j int) {
- flat[i], flat[j] = flat[j], flat[i]
- })
-
getHeaderPrm.OID = members[i]
for j := range flat {
@@ -126,8 +114,12 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
cs, _ := sg.ValidationDataHash()
tzCheck := !homomorphicHashingEnabled || bytes.Equal(tzHash, cs.Value())
+ c.writeCheckReport(sizeCheck, tzCheck, sgID, sg, totalSize)
+}
+
+func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg storagegroupSDK.StorageGroup, totalSize uint64) {
if sizeCheck && tzCheck {
- c.report.PassedPoR(sgID) // write report
+ c.report.PassedPoR(sgID)
} else {
if !sizeCheck {
c.log.Debug("storage group size check failed",
@@ -139,6 +131,25 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
c.log.Debug("storage group tz hash check failed")
}
- c.report.FailedPoR(sgID) // write report
+ c.report.FailedPoR(sgID)
}
}
+
+func (c *Context) getShuffledNodes(member oid.ID, sgID oid.ID) ([]netmap.NodeInfo, bool) {
+ objectPlacement, err := c.buildPlacement(member)
+ if err != nil {
+ c.log.Info("can't build placement for storage group member",
+ zap.Stringer("sg", sgID),
+ zap.String("member_id", member.String()),
+ )
+
+ return nil, false
+ }
+
+ flat := placement.FlattenNodes(objectPlacement)
+
+ rand.Shuffle(len(flat), func(i, j int) {
+ flat[i], flat[j] = flat[j], flat[i]
+ })
+ return flat, true
+}
From 469e8a6e59a26a24614eefc8f50f641e452ad616 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 4 Apr 2023 17:47:00 +0300
Subject: [PATCH 0066/1943] [#212] reputationsvc: Resolve containedctx linter
Signed-off-by: Dmitrii Stepanov
---
.../reputation/eigentrust/controller/calls.go | 18 ++----------------
.../eigentrust/controller/controller.go | 4 ++--
.../reputation/eigentrust/controller/deps.go | 6 ------
3 files changed, 4 insertions(+), 24 deletions(-)
diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go
index 28228706c..faf953aed 100644
--- a/pkg/services/reputation/eigentrust/controller/calls.go
+++ b/pkg/services/reputation/eigentrust/controller/calls.go
@@ -1,8 +1,6 @@
package eigentrustctrl
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
"go.uber.org/zap"
)
@@ -12,10 +10,7 @@ type ContinuePrm struct {
Epoch uint64
}
-// nolint: containedctx
type iterContext struct {
- context.Context
-
eigentrust.EpochIteration
iterationNumber uint32
@@ -26,12 +21,6 @@ func (x iterContext) Last() bool {
return x.last
}
-type iterContextCancel struct {
- iterContext
-
- cancel context.CancelFunc
-}
-
// Continue moves the global reputation calculator to the next iteration.
func (c *Controller) Continue(prm ContinuePrm) {
c.mtx.Lock()
@@ -39,10 +28,9 @@ func (c *Controller) Continue(prm ContinuePrm) {
{
iterCtx, ok := c.mCtx[prm.Epoch]
if !ok {
- iterCtx = new(iterContextCancel)
+ iterCtx = new(iterContext)
c.mCtx[prm.Epoch] = iterCtx
- iterCtx.Context, iterCtx.cancel = context.WithCancel(context.Background())
iterCtx.EpochIteration.SetEpoch(prm.Epoch)
iterations, err := c.prm.IterationsProvider.EigenTrustIterations()
@@ -53,14 +41,12 @@ func (c *Controller) Continue(prm ContinuePrm) {
} else {
iterCtx.iterationNumber = uint32(iterations)
}
- } else {
- iterCtx.cancel()
}
iterCtx.last = iterCtx.I() == iterCtx.iterationNumber-1
err := c.prm.WorkerPool.Submit(func() {
- c.prm.DaughtersTrustCalculator.Calculate(iterCtx.iterContext)
+ c.prm.DaughtersTrustCalculator.Calculate(iterCtx)
// iteration++
iterCtx.Increment()
diff --git a/pkg/services/reputation/eigentrust/controller/controller.go b/pkg/services/reputation/eigentrust/controller/controller.go
index fe9150bcf..a6d0d4a82 100644
--- a/pkg/services/reputation/eigentrust/controller/controller.go
+++ b/pkg/services/reputation/eigentrust/controller/controller.go
@@ -47,7 +47,7 @@ type Controller struct {
opts *options
mtx sync.Mutex
- mCtx map[uint64]*iterContextCancel
+ mCtx map[uint64]*iterContext
}
const invalidPrmValFmt = "invalid parameter %s (%T):%v"
@@ -81,6 +81,6 @@ func New(prm Prm, opts ...Option) *Controller {
return &Controller{
prm: prm,
opts: o,
- mCtx: make(map[uint64]*iterContextCancel),
+ mCtx: make(map[uint64]*iterContext),
}
}
diff --git a/pkg/services/reputation/eigentrust/controller/deps.go b/pkg/services/reputation/eigentrust/controller/deps.go
index 8c4752657..2aeafb61a 100644
--- a/pkg/services/reputation/eigentrust/controller/deps.go
+++ b/pkg/services/reputation/eigentrust/controller/deps.go
@@ -1,14 +1,8 @@
package eigentrustctrl
-import (
- "context"
-)
-
// IterationContext is a context of the i-th
// stage of iterative EigenTrust algorithm.
type IterationContext interface {
- context.Context
-
// Must return epoch number to select the values
// for global trust calculation.
Epoch() uint64
From 7ebbfa3358b0c8e61a36548b29741ef0732be366 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 10:44:40 +0300
Subject: [PATCH 0067/1943] [#212] reputationsvc: Resolve linters and rename
Resolved containedctx linters.
Renamed context structs and interfaces to more understandble names.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/main.go | 2 +-
cmd/frostfs-node/reputation.go | 37 ++++++------
cmd/frostfs-node/reputation/common/remote.go | 11 ++--
cmd/frostfs-node/reputation/common/util.go | 13 ++--
.../reputation/intermediate/calculator.go | 11 ++--
.../reputation/intermediate/consumers.go | 30 +++++-----
.../reputation/intermediate/contract.go | 2 +-
.../reputation/intermediate/daughters.go | 16 ++---
.../reputation/intermediate/remote.go | 32 +++++-----
.../reputation/intermediate/storage.go | 6 +-
.../reputation/internal/client/client.go | 18 ++----
cmd/frostfs-node/reputation/local/remote.go | 16 ++---
cmd/frostfs-node/reputation/local/storage.go | 10 ++--
pkg/services/reputation/common/deps.go | 13 ++--
.../reputation/common/router/calls.go | 53 +++++++++--------
.../reputation/eigentrust/calculator/calls.go | 59 +++++++++----------
.../reputation/eigentrust/calculator/deps.go | 22 +++----
.../reputation/eigentrust/controller/calls.go | 6 +-
.../reputation/eigentrust/controller/deps.go | 4 +-
.../reputation/eigentrust/iteration.go | 17 +-----
.../reputation/local/controller/calls.go | 53 ++++++++---------
.../reputation/local/controller/controller.go | 4 +-
.../reputation/local/controller/deps.go | 2 +-
.../reputation/local/controller/util.go | 4 +-
24 files changed, 203 insertions(+), 238 deletions(-)
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index d5f711a51..cddedabe9 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -99,7 +99,7 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
- initAndLog(c, "reputation", initReputationService)
+ initAndLog(c, "reputation", func(c *cfg) { initReputationService(ctx, c) })
initAndLog(c, "notification", initNotifications)
initAndLog(c, "object", initObjectService)
initAndLog(c, "tree", initTreeService)
diff --git a/cmd/frostfs-node/reputation.go b/cmd/frostfs-node/reputation.go
index 7b43443c2..a96bd066e 100644
--- a/cmd/frostfs-node/reputation.go
+++ b/cmd/frostfs-node/reputation.go
@@ -33,7 +33,7 @@ import (
"go.uber.org/zap"
)
-func initReputationService(c *cfg) {
+func initReputationService(ctx context.Context, c *cfg) {
wrap, err := repClient.NewFromMorph(c.cfgMorph.client, c.cfgReputation.scriptHash, 0, repClient.TryNotary())
fatalOnErr(err)
@@ -73,7 +73,7 @@ func initReputationService(c *cfg) {
c.cfgReputation.localTrustCtrl = createLocalTrustController(c, localTrustLogger, localKey, localTrustRouter)
- addReputationReportHandler(c)
+ addReputationReportHandler(ctx, c)
server := grpcreputation.New(
reputationrpc.NewSignService(
@@ -98,10 +98,10 @@ func initReputationService(c *cfg) {
// initialize eigen trust block timer
newEigenTrustIterTimer(c)
- addEigenTrustEpochHandler(c, eigenTrustController)
+ addEigenTrustEpochHandler(ctx, c, eigenTrustController)
}
-func addReputationReportHandler(c *cfg) {
+func addReputationReportHandler(ctx context.Context, c *cfg) {
addNewEpochAsyncNotificationHandler(
c,
func(ev event.Event) {
@@ -112,12 +112,12 @@ func addReputationReportHandler(c *cfg) {
// report collected values from previous epoch
reportPrm.SetEpoch(ev.(netmap.NewEpoch).EpochNumber() - 1)
- c.cfgReputation.localTrustCtrl.Report(reportPrm)
+ c.cfgReputation.localTrustCtrl.Report(ctx, reportPrm)
},
)
}
-func addEigenTrustEpochHandler(c *cfg, eigenTrustController *eigentrustctrl.Controller) {
+func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController *eigentrustctrl.Controller) {
addNewEpochAsyncNotificationHandler(
c,
func(e event.Event) {
@@ -138,7 +138,7 @@ func addEigenTrustEpochHandler(c *cfg, eigenTrustController *eigentrustctrl.Cont
}
epochTimer, err := ticker.NewIterationsTicker(duration, iterations, func() {
- eigenTrustController.Continue(
+ eigenTrustController.Continue(ctx,
eigentrustctrl.ContinuePrm{
Epoch: epoch - 1,
},
@@ -286,8 +286,8 @@ func createLocalTrustController(c *cfg, localTrustLogger *logger.Logger, localKe
type reputationServer struct {
*cfg
log *logger.Logger
- localRouter reputationcommon.WriterProvider
- intermediateRouter reputationcommon.WriterProvider
+ localRouter *reputationrouter.Router
+ intermediateRouter *reputationrouter.Router
routeBuilder reputationrouter.Builder
}
@@ -297,18 +297,17 @@ func (s *reputationServer) AnnounceLocalTrust(ctx context.Context, req *v2reputa
body := req.GetBody()
- eCtx := &common.EpochContext{
- Context: ctx,
- E: body.GetEpoch(),
+ ep := &common.EpochProvider{
+ E: body.GetEpoch(),
}
- w, err := s.localRouter.InitWriter(reputationrouter.NewRouteContext(eCtx, passedRoute))
+ w, err := s.localRouter.InitWriter(reputationrouter.NewRouteInfo(ep, passedRoute))
if err != nil {
return nil, fmt.Errorf("could not initialize local trust writer: %w", err)
}
for _, trust := range body.GetTrusts() {
- err = s.processLocalTrust(body.GetEpoch(), apiToLocalTrust(&trust, passedRoute[0].PublicKey()), passedRoute, w)
+ err = s.processLocalTrust(ctx, body.GetEpoch(), apiToLocalTrust(&trust, passedRoute[0].PublicKey()), passedRoute, w)
if err != nil {
return nil, fmt.Errorf("could not write one of local trusts: %w", err)
}
@@ -326,9 +325,9 @@ func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *
body := req.GetBody()
- eiCtx := eigentrust.NewIterContext(ctx, body.GetEpoch(), body.GetIteration())
+ ei := eigentrust.NewEpochIteration(body.GetEpoch(), body.GetIteration())
- w, err := s.intermediateRouter.InitWriter(reputationrouter.NewRouteContext(eiCtx, passedRoute))
+ w, err := s.intermediateRouter.InitWriter(reputationrouter.NewRouteInfo(ei, passedRoute))
if err != nil {
return nil, fmt.Errorf("could not initialize trust writer: %w", err)
}
@@ -337,7 +336,7 @@ func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *
trust := apiToLocalTrust(v2Trust.GetTrust(), v2Trust.GetTrustingPeer().GetPublicKey())
- err = w.Write(trust)
+ err = w.Write(ctx, trust)
if err != nil {
return nil, fmt.Errorf("could not write trust: %w", err)
}
@@ -348,14 +347,14 @@ func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *
return resp, nil
}
-func (s *reputationServer) processLocalTrust(epoch uint64, t reputation.Trust,
+func (s *reputationServer) processLocalTrust(ctx context.Context, epoch uint64, t reputation.Trust,
passedRoute []reputationcommon.ServerInfo, w reputationcommon.Writer) error {
err := reputationrouter.CheckRoute(s.routeBuilder, epoch, t, passedRoute)
if err != nil {
return fmt.Errorf("wrong route of reputation trust value: %w", err)
}
- return w.Write(t)
+ return w.Write(ctx, t)
}
// apiToLocalTrust converts v2 Trust to local reputation.Trust, adding trustingPeer.
diff --git a/cmd/frostfs-node/reputation/common/remote.go b/cmd/frostfs-node/reputation/common/remote.go
index 0fe0a7fd7..cd0a024a9 100644
--- a/cmd/frostfs-node/reputation/common/remote.go
+++ b/cmd/frostfs-node/reputation/common/remote.go
@@ -6,7 +6,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router"
trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
)
@@ -22,12 +21,12 @@ type clientKeyRemoteProvider interface {
WithClient(client.Client) reputationcommon.WriterProvider
}
-// remoteTrustProvider is an implementation of reputation RemoteWriterProvider interface.
+// RemoteTrustProvider is an implementation of reputation RemoteWriterProvider interface.
// It caches clients, checks if it is the end of the route and checks either the current
// node is a remote target or not.
//
// remoteTrustProvider requires to be provided with clientKeyRemoteProvider.
-type remoteTrustProvider struct {
+type RemoteTrustProvider struct {
netmapKeys netmap.AnnouncedKeys
deadEndProvider reputationcommon.WriterProvider
clientCache clientCache
@@ -48,7 +47,7 @@ type RemoteProviderPrm struct {
Log *logger.Logger
}
-func NewRemoteTrustProvider(prm RemoteProviderPrm) reputationrouter.RemoteWriterProvider {
+func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
switch {
case prm.NetmapKeys == nil:
PanicOnPrmValue("NetmapKeys", prm.NetmapKeys)
@@ -62,7 +61,7 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) reputationrouter.RemoteWriter
PanicOnPrmValue("Logger", prm.Log)
}
- return &remoteTrustProvider{
+ return &RemoteTrustProvider{
netmapKeys: prm.NetmapKeys,
deadEndProvider: prm.DeadEndProvider,
clientCache: prm.ClientCache,
@@ -71,7 +70,7 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) reputationrouter.RemoteWriter
}
}
-func (rtp *remoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
+func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
rtp.log.Debug("initializing remote writer provider")
if srv == nil {
diff --git a/cmd/frostfs-node/reputation/common/util.go b/cmd/frostfs-node/reputation/common/util.go
index 28351d0ce..443adb388 100644
--- a/cmd/frostfs-node/reputation/common/util.go
+++ b/cmd/frostfs-node/reputation/common/util.go
@@ -7,24 +7,21 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
)
-// EpochContext is a std context extended with epoch data.
-// nolint: containedctx
-type EpochContext struct {
- context.Context
+type EpochProvider struct {
E uint64
}
-func (ctx *EpochContext) Epoch() uint64 {
- return ctx.E
+func (ep *EpochProvider) Epoch() uint64 {
+ return ep.E
}
type NopReputationWriter struct{}
-func (NopReputationWriter) Write(reputation.Trust) error {
+func (NopReputationWriter) Write(context.Context, reputation.Trust) error {
return nil
}
-func (NopReputationWriter) Close() error {
+func (NopReputationWriter) Close(context.Context) error {
return nil
}
diff --git a/cmd/frostfs-node/reputation/intermediate/calculator.go b/cmd/frostfs-node/reputation/intermediate/calculator.go
index 8bc74324f..73dd12311 100644
--- a/cmd/frostfs-node/reputation/intermediate/calculator.go
+++ b/cmd/frostfs-node/reputation/intermediate/calculator.go
@@ -1,6 +1,7 @@
package intermediate
import (
+ "context"
"errors"
"fmt"
@@ -42,15 +43,15 @@ type DaughtersTrustCalculator struct {
}
// Calculate converts and passes values to the wrapped calculator.
-func (c *DaughtersTrustCalculator) Calculate(ctx eigentrustctrl.IterationContext) {
+func (c *DaughtersTrustCalculator) Calculate(ctx context.Context, iterCtx eigentrustctrl.IterationContext) {
calcPrm := eigencalc.CalculatePrm{}
epochIteration := eigentrust.EpochIteration{}
- epochIteration.SetEpoch(ctx.Epoch())
- epochIteration.SetI(ctx.I())
+ epochIteration.SetEpoch(iterCtx.Epoch())
+ epochIteration.SetI(iterCtx.I())
- calcPrm.SetLast(ctx.Last())
+ calcPrm.SetLast(iterCtx.Last())
calcPrm.SetEpochIteration(epochIteration)
- c.Calculator.Calculate(calcPrm)
+ c.Calculator.Calculate(ctx, calcPrm)
}
diff --git a/cmd/frostfs-node/reputation/intermediate/consumers.go b/cmd/frostfs-node/reputation/intermediate/consumers.go
index fb89c4396..33eab605b 100644
--- a/cmd/frostfs-node/reputation/intermediate/consumers.go
+++ b/cmd/frostfs-node/reputation/intermediate/consumers.go
@@ -1,6 +1,8 @@
package intermediate
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
@@ -23,41 +25,41 @@ type ConsumerStorageWriterProvider struct {
// that writes passed consumer's Trust values to the Consumer storage. After writing
// that, values can be used in eigenTrust algorithm's iterations.
type ConsumerTrustWriter struct {
- log *logger.Logger
- storage *consumerstorage.Storage
- eiCtx eigencalc.Context
+ log *logger.Logger
+ storage *consumerstorage.Storage
+ iterInfo eigencalc.EpochIterationInfo
}
-func (w *ConsumerTrustWriter) Write(t reputation.Trust) error {
+func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
w.log.Debug("writing received consumer's trusts",
- zap.Uint64("epoch", w.eiCtx.Epoch()),
- zap.Uint32("iteration", w.eiCtx.I()),
+ zap.Uint64("epoch", w.iterInfo.Epoch()),
+ zap.Uint32("iteration", w.iterInfo.I()),
zap.Stringer("trusting_peer", t.TrustingPeer()),
zap.Stringer("trusted_peer", t.Peer()),
)
trust := eigentrust.IterationTrust{Trust: t}
- trust.SetEpoch(w.eiCtx.Epoch())
- trust.SetI(w.eiCtx.I())
+ trust.SetEpoch(w.iterInfo.Epoch())
+ trust.SetI(w.iterInfo.I())
w.storage.Put(trust)
return nil
}
-func (w *ConsumerTrustWriter) Close() error {
+func (w *ConsumerTrustWriter) Close(context.Context) error {
return nil
}
-func (s *ConsumerStorageWriterProvider) InitWriter(ctx reputationcommon.Context) (reputationcommon.Writer, error) {
- eiCtx, ok := ctx.(eigencalc.Context)
+func (s *ConsumerStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
+ iterInfo, ok := ep.(eigencalc.EpochIterationInfo)
if !ok {
panic(ErrIncorrectContextPanicMsg)
}
return &ConsumerTrustWriter{
- log: s.Log,
- storage: s.Storage,
- eiCtx: eiCtx,
+ log: s.Log,
+ storage: s.Storage,
+ iterInfo: iterInfo,
}, nil
}
diff --git a/cmd/frostfs-node/reputation/intermediate/contract.go b/cmd/frostfs-node/reputation/intermediate/contract.go
index 7e6411329..6303b1219 100644
--- a/cmd/frostfs-node/reputation/intermediate/contract.go
+++ b/cmd/frostfs-node/reputation/intermediate/contract.go
@@ -51,7 +51,7 @@ type FinalWriterProvider struct {
}
func (fwp FinalWriterProvider) InitIntermediateWriter(
- _ eigentrustcalc.Context) (eigentrustcalc.IntermediateWriter, error) {
+ _ eigentrustcalc.EpochIterationInfo) (eigentrustcalc.IntermediateWriter, error) {
return &FinalWriter{
privatKey: fwp.prm.PrivatKey,
pubKey: fwp.prm.PubKey,
diff --git a/cmd/frostfs-node/reputation/intermediate/daughters.go b/cmd/frostfs-node/reputation/intermediate/daughters.go
index 641a0afe2..d72eead43 100644
--- a/cmd/frostfs-node/reputation/intermediate/daughters.go
+++ b/cmd/frostfs-node/reputation/intermediate/daughters.go
@@ -1,6 +1,8 @@
package intermediate
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
@@ -21,28 +23,28 @@ type DaughterStorageWriterProvider struct {
type DaughterTrustWriter struct {
log *logger.Logger
storage *daughters.Storage
- ctx reputationcommon.Context
+ ep reputationcommon.EpochProvider
}
-func (w *DaughterTrustWriter) Write(t reputation.Trust) error {
+func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
w.log.Debug("writing received daughter's trusts",
- zap.Uint64("epoch", w.ctx.Epoch()),
+ zap.Uint64("epoch", w.ep.Epoch()),
zap.Stringer("trusting_peer", t.TrustingPeer()),
zap.Stringer("trusted_peer", t.Peer()),
)
- w.storage.Put(w.ctx.Epoch(), t)
+ w.storage.Put(w.ep.Epoch(), t)
return nil
}
-func (w *DaughterTrustWriter) Close() error {
+func (w *DaughterTrustWriter) Close(context.Context) error {
return nil
}
-func (s *DaughterStorageWriterProvider) InitWriter(ctx reputationcommon.Context) (reputationcommon.Writer, error) {
+func (s *DaughterStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
return &DaughterTrustWriter{
log: s.Log,
storage: s.Storage,
- ctx: ctx,
+ ep: ep,
}, nil
}
diff --git a/cmd/frostfs-node/reputation/intermediate/remote.go b/cmd/frostfs-node/reputation/intermediate/remote.go
index 224da9439..b1a218b94 100644
--- a/cmd/frostfs-node/reputation/intermediate/remote.go
+++ b/cmd/frostfs-node/reputation/intermediate/remote.go
@@ -1,6 +1,7 @@
package intermediate
import (
+ "context"
"crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
@@ -64,32 +65,32 @@ type TrustWriterProvider struct {
log *logger.Logger
}
-func (twp *TrustWriterProvider) InitWriter(ctx reputationcommon.Context) (reputationcommon.Writer, error) {
- eiContext, ok := ctx.(eigentrustcalc.Context)
+func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
+ iterInfo, ok := ep.(eigentrustcalc.EpochIterationInfo)
if !ok {
// TODO: #1164 think if this can be done without such limitation
panic(ErrIncorrectContextPanicMsg)
}
return &RemoteTrustWriter{
- eiCtx: eiContext,
- client: twp.client,
- key: twp.key,
- log: twp.log,
+ iterInfo: iterInfo,
+ client: twp.client,
+ key: twp.key,
+ log: twp.log,
}, nil
}
type RemoteTrustWriter struct {
- eiCtx eigentrustcalc.Context
- client coreclient.Client
- key *ecdsa.PrivateKey
- log *logger.Logger
+ iterInfo eigentrustcalc.EpochIterationInfo
+ client coreclient.Client
+ key *ecdsa.PrivateKey
+ log *logger.Logger
}
// Write sends a trust value to a remote node via ReputationService.AnnounceIntermediateResult RPC.
-func (rtp *RemoteTrustWriter) Write(t reputation.Trust) error {
- epoch := rtp.eiCtx.Epoch()
- i := rtp.eiCtx.I()
+func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) error {
+ epoch := rtp.iterInfo.Epoch()
+ i := rtp.iterInfo.I()
rtp.log.Debug("announcing trust",
zap.Uint64("epoch", epoch),
@@ -108,17 +109,16 @@ func (rtp *RemoteTrustWriter) Write(t reputation.Trust) error {
var p internalclient.AnnounceIntermediatePrm
- p.SetContext(rtp.eiCtx)
p.SetClient(rtp.client)
p.SetEpoch(epoch)
p.SetIteration(i)
p.SetTrust(apiPeerToPeerTrust)
- _, err := internalclient.AnnounceIntermediate(p)
+ _, err := internalclient.AnnounceIntermediate(ctx, p)
return err
}
-func (rtp *RemoteTrustWriter) Close() error {
+func (rtp *RemoteTrustWriter) Close(context.Context) error {
return nil
}
diff --git a/cmd/frostfs-node/reputation/intermediate/storage.go b/cmd/frostfs-node/reputation/intermediate/storage.go
index 0f614d9f2..db29ff92b 100644
--- a/cmd/frostfs-node/reputation/intermediate/storage.go
+++ b/cmd/frostfs-node/reputation/intermediate/storage.go
@@ -18,7 +18,7 @@ type DaughterTrustIteratorProvider struct {
// InitDaughterIterator returns an iterator over the received
// local trusts for ctx.Epoch() epoch from daughter p.
-func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc.Context,
+func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc.EpochIterationInfo,
p apireputation.PeerID) (eigentrustcalc.TrustIterator, error) {
epoch := ctx.Epoch()
@@ -34,7 +34,7 @@ func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc
// daughters of the current node(manager) and all local
// trusts received from them for ctx.Epoch() epoch.
func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator(
- ctx eigentrustcalc.Context) (eigentrustcalc.PeerTrustsIterator, error) {
+ ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
epoch := ctx.Epoch()
iter, ok := ip.DaughterStorage.AllDaughterTrusts(epoch)
@@ -49,7 +49,7 @@ func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator(
// of the current node(manager) and all their consumers' local
// trusts for ctx.Epoch() epoch and ctx.I() iteration.
func (ip *DaughterTrustIteratorProvider) InitConsumersIterator(
- ctx eigentrustcalc.Context) (eigentrustcalc.PeerTrustsIterator, error) {
+ ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
epoch, iter := ctx.Epoch(), ctx.I()
consumerIterator, ok := ip.ConsumerStorage.Consumers(epoch, iter)
diff --git a/cmd/frostfs-node/reputation/internal/client/client.go b/cmd/frostfs-node/reputation/internal/client/client.go
index 22fd21d4a..ff5131262 100644
--- a/cmd/frostfs-node/reputation/internal/client/client.go
+++ b/cmd/frostfs-node/reputation/internal/client/client.go
@@ -9,11 +9,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
)
-// nolint: containedctx
type commonPrm struct {
cli coreclient.Client
-
- ctx context.Context
}
// SetClient sets the base client for FrostFS API communication.
@@ -23,13 +20,6 @@ func (x *commonPrm) SetClient(cli coreclient.Client) {
x.cli = cli
}
-// SetContext sets context.Context for network communication.
-//
-// Required parameter.
-func (x *commonPrm) SetContext(ctx context.Context) {
- x.ctx = ctx
-}
-
// AnnounceLocalPrm groups parameters of AnnounceLocal operation.
type AnnounceLocalPrm struct {
commonPrm
@@ -55,10 +45,10 @@ type AnnounceLocalRes struct{}
// Client, context and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func AnnounceLocal(prm AnnounceLocalPrm) (res AnnounceLocalRes, err error) {
+func AnnounceLocal(ctx context.Context, prm AnnounceLocalPrm) (res AnnounceLocalRes, err error) {
var cliRes *client.ResAnnounceLocalTrust
- cliRes, err = prm.cli.AnnounceLocalTrust(prm.ctx, prm.cliPrm)
+ cliRes, err = prm.cli.AnnounceLocalTrust(ctx, prm.cliPrm)
if err == nil {
// pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status())
@@ -98,10 +88,10 @@ type AnnounceIntermediateRes struct{}
// Client, context and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func AnnounceIntermediate(prm AnnounceIntermediatePrm) (res AnnounceIntermediateRes, err error) {
+func AnnounceIntermediate(ctx context.Context, prm AnnounceIntermediatePrm) (res AnnounceIntermediateRes, err error) {
var cliRes *client.ResAnnounceIntermediateTrust
- cliRes, err = prm.cli.AnnounceIntermediateTrust(prm.ctx, prm.cliPrm)
+ cliRes, err = prm.cli.AnnounceIntermediateTrust(ctx, prm.cliPrm)
if err == nil {
// pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status())
diff --git a/cmd/frostfs-node/reputation/local/remote.go b/cmd/frostfs-node/reputation/local/remote.go
index 2fa93ff6f..3c929a9ca 100644
--- a/cmd/frostfs-node/reputation/local/remote.go
+++ b/cmd/frostfs-node/reputation/local/remote.go
@@ -1,6 +1,7 @@
package local
import (
+ "context"
"crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
@@ -63,9 +64,9 @@ type TrustWriterProvider struct {
log *logger.Logger
}
-func (twp *TrustWriterProvider) InitWriter(ctx reputationcommon.Context) (reputationcommon.Writer, error) {
+func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
return &RemoteTrustWriter{
- ctx: ctx,
+ ep: ep,
client: twp.client,
key: twp.key,
log: twp.log,
@@ -73,7 +74,7 @@ func (twp *TrustWriterProvider) InitWriter(ctx reputationcommon.Context) (reputa
}
type RemoteTrustWriter struct {
- ctx reputationcommon.Context
+ ep reputationcommon.EpochProvider
client coreclient.Client
key *ecdsa.PrivateKey
log *logger.Logger
@@ -81,7 +82,7 @@ type RemoteTrustWriter struct {
buf []reputationapi.Trust
}
-func (rtp *RemoteTrustWriter) Write(t reputation.Trust) error {
+func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error {
var apiTrust reputationapi.Trust
apiTrust.SetValue(t.Value().Float64())
@@ -92,8 +93,8 @@ func (rtp *RemoteTrustWriter) Write(t reputation.Trust) error {
return nil
}
-func (rtp *RemoteTrustWriter) Close() error {
- epoch := rtp.ctx.Epoch()
+func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
+ epoch := rtp.ep.Epoch()
rtp.log.Debug("announcing trusts",
zap.Uint64("epoch", epoch),
@@ -101,12 +102,11 @@ func (rtp *RemoteTrustWriter) Close() error {
var prm internalclient.AnnounceLocalPrm
- prm.SetContext(rtp.ctx)
prm.SetClient(rtp.client)
prm.SetEpoch(epoch)
prm.SetTrusts(rtp.buf)
- _, err := internalclient.AnnounceLocal(prm)
+ _, err := internalclient.AnnounceLocal(ctx, prm)
return err
}
diff --git a/cmd/frostfs-node/reputation/local/storage.go b/cmd/frostfs-node/reputation/local/storage.go
index 92d10dfee..861151871 100644
--- a/cmd/frostfs-node/reputation/local/storage.go
+++ b/cmd/frostfs-node/reputation/local/storage.go
@@ -24,8 +24,8 @@ type TrustStorage struct {
LocalKey []byte
}
-func (s *TrustStorage) InitIterator(ctx reputationcommon.Context) (trustcontroller.Iterator, error) {
- epoch := ctx.Epoch()
+func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
+ epoch := ep.Epoch()
s.Log.Debug("initializing iterator over trusts",
zap.Uint64("epoch", epoch),
@@ -37,14 +37,14 @@ func (s *TrustStorage) InitIterator(ctx reputationcommon.Context) (trustcontroll
}
return &TrustIterator{
- ctx: ctx,
+ ep: ep,
storage: s,
epochStorage: epochStorage,
}, nil
}
type TrustIterator struct {
- ctx reputationcommon.Context
+ ep reputationcommon.EpochProvider
storage *TrustStorage
@@ -59,7 +59,7 @@ func (it *TrustIterator) Iterate(h reputation.TrustHandler) error {
}
}
- nm, err := it.storage.NmSrc.GetNetMapByEpoch(it.ctx.Epoch())
+ nm, err := it.storage.NmSrc.GetNetMapByEpoch(it.ep.Epoch())
if err != nil {
return err
}
diff --git a/pkg/services/reputation/common/deps.go b/pkg/services/reputation/common/deps.go
index ebb227b5e..3ea5aa88e 100644
--- a/pkg/services/reputation/common/deps.go
+++ b/pkg/services/reputation/common/deps.go
@@ -2,17 +2,12 @@ package common
import (
"context"
- "io"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
)
-// Context wraps stdlib context
-// with accompanying meta values.
-type Context interface {
- context.Context
-
+type EpochProvider interface {
// Must return epoch number to select the values.
Epoch() uint64
}
@@ -30,7 +25,7 @@ type Writer interface {
// Close operation.
//
// Write must not be called after Close.
- Write(reputation.Trust) error
+ Write(context.Context, reputation.Trust) error
// Close exits with method-providing Writer.
//
@@ -38,7 +33,7 @@ type Writer interface {
// the Close's return.
//
// Methods must not be called after Close.
- io.Closer
+ Close(context.Context) error
}
// WriterProvider is a group of methods provided
@@ -52,7 +47,7 @@ type WriterProvider interface {
//
// Implementations can have different logic for different
// contexts, so specific ones may document their own behavior.
- InitWriter(Context) (Writer, error)
+ InitWriter(EpochProvider) (Writer, error)
}
// ManagerBuilder defines an interface for providing a list
diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go
index 75cdf56ea..a177f6a2b 100644
--- a/pkg/services/reputation/common/router/calls.go
+++ b/pkg/services/reputation/common/router/calls.go
@@ -1,6 +1,7 @@
package router
import (
+ "context"
"encoding/hex"
"sync"
@@ -9,27 +10,27 @@ import (
"go.uber.org/zap"
)
-// routeContext wraps context with additional passed
+// RouteInfo wraps epoch provider with additional passed
// route data. It is only used inside Router and is
// not passed in any external methods.
-type routeContext struct {
- common.Context
+type RouteInfo struct {
+ common.EpochProvider
passedRoute []common.ServerInfo
}
-// NewRouteContext wraps the main context of value passing with its traversal route and epoch.
-func NewRouteContext(ctx common.Context, passed []common.ServerInfo) common.Context {
- return &routeContext{
- Context: ctx,
- passedRoute: passed,
+// NewRouteInfo wraps the main context of value passing with its traversal route and epoch.
+func NewRouteInfo(ep common.EpochProvider, passed []common.ServerInfo) *RouteInfo {
+ return &RouteInfo{
+ EpochProvider: ep,
+ passedRoute: passed,
}
}
type trustWriter struct {
router *Router
- routeCtx *routeContext
+ routeInfo *RouteInfo
routeMtx sync.RWMutex
mServers map[string]common.Writer
@@ -37,7 +38,7 @@ type trustWriter struct {
// InitWriter initializes and returns Writer that sends each value to its next route point.
//
-// If ctx was created by NewRouteContext, then the traversed route is taken into account,
+// If ep was created by NewRouteInfo, then the traversed route is taken into account,
// and the value will be sent to its continuation. Otherwise, the route will be laid
// from scratch and the value will be sent to its primary point.
//
@@ -49,31 +50,31 @@ type trustWriter struct {
// runtime and never returns an error.
//
// Always returns nil error.
-func (r *Router) InitWriter(ctx common.Context) (common.Writer, error) {
+func (r *Router) InitWriter(ep common.EpochProvider) (common.Writer, error) {
var (
- routeCtx *routeContext
- ok bool
+ routeInfo *RouteInfo
+ ok bool
)
- if routeCtx, ok = ctx.(*routeContext); !ok {
- routeCtx = &routeContext{
- Context: ctx,
- passedRoute: []common.ServerInfo{r.localSrvInfo},
+ if routeInfo, ok = ep.(*RouteInfo); !ok {
+ routeInfo = &RouteInfo{
+ EpochProvider: ep,
+ passedRoute: []common.ServerInfo{r.localSrvInfo},
}
}
return &trustWriter{
- router: r,
- routeCtx: routeCtx,
- mServers: make(map[string]common.Writer),
+ router: r,
+ routeInfo: routeInfo,
+ mServers: make(map[string]common.Writer),
}, nil
}
-func (w *trustWriter) Write(t reputation.Trust) error {
+func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error {
w.routeMtx.Lock()
defer w.routeMtx.Unlock()
- route, err := w.router.routeBuilder.NextStage(w.routeCtx.Epoch(), t, w.routeCtx.passedRoute)
+ route, err := w.router.routeBuilder.NextStage(w.routeInfo.Epoch(), t, w.routeInfo.passedRoute)
if err != nil {
return err
} else if len(route) == 0 {
@@ -99,7 +100,7 @@ func (w *trustWriter) Write(t reputation.Trust) error {
}
// init writer with original context wrapped in routeContext
- remoteWriter, err = provider.InitWriter(w.routeCtx.Context)
+ remoteWriter, err = provider.InitWriter(w.routeInfo.EpochProvider)
if err != nil {
w.router.log.Debug("could not initialize writer",
zap.String("error", err.Error()),
@@ -111,7 +112,7 @@ func (w *trustWriter) Write(t reputation.Trust) error {
w.mServers[key] = remoteWriter
}
- err := remoteWriter.Write(t)
+ err := remoteWriter.Write(ctx, t)
if err != nil {
w.router.log.Debug("could not write the value",
zap.String("error", err.Error()),
@@ -122,9 +123,9 @@ func (w *trustWriter) Write(t reputation.Trust) error {
return nil
}
-func (w *trustWriter) Close() error {
+func (w *trustWriter) Close(ctx context.Context) error {
for key, wRemote := range w.mServers {
- err := wRemote.Close()
+ err := wRemote.Close(ctx)
if err != nil {
w.router.log.Debug("could not close remote server writer",
zap.String("key", key),
diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go
index 23e418729..2cc789780 100644
--- a/pkg/services/reputation/eigentrust/calculator/calls.go
+++ b/pkg/services/reputation/eigentrust/calculator/calls.go
@@ -23,7 +23,7 @@ func (p *CalculatePrm) SetEpochIteration(ei eigentrust.EpochIteration) {
p.ei = ei
}
-func (c *Calculator) Calculate(prm CalculatePrm) {
+func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
alpha, err := c.prm.AlphaProvider.EigenTrustAlpha()
if err != nil {
c.opts.log.Debug(
@@ -36,28 +36,25 @@ func (c *Calculator) Calculate(prm CalculatePrm) {
c.alpha = reputation.TrustValueFromFloat64(alpha)
c.beta = reputation.TrustValueFromFloat64(1 - alpha)
- ctx := eigentrust.IterContext{
- Context: context.Background(),
- EpochIteration: prm.ei,
- }
+ epochIteration := prm.ei
- iter := ctx.I()
+ iter := epochIteration.I()
log := c.opts.log.With(
- zap.Uint64("epoch", ctx.Epoch()),
+ zap.Uint64("epoch", epochIteration.Epoch()),
zap.Uint32("iteration", iter),
)
if iter == 0 {
- c.sendInitialValues(ctx)
+ c.sendInitialValues(ctx, epochIteration)
return
}
// decrement iteration number to select the values collected
// on the previous stage
- ctx.SetI(iter - 1)
+ epochIteration.SetI(iter - 1)
- consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(ctx)
+ consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(epochIteration)
if err != nil {
log.Debug("consumers trust iterator's init failure",
zap.String("error", err.Error()),
@@ -67,13 +64,13 @@ func (c *Calculator) Calculate(prm CalculatePrm) {
}
// continue with initial iteration number
- ctx.SetI(iter)
+ epochIteration.SetI(iter)
err = consumersIter.Iterate(func(daughter apireputation.PeerID, iter TrustIterator) error {
err := c.prm.WorkerPool.Submit(func() {
- c.iterateDaughter(iterDaughterPrm{
+ c.iterateDaughter(ctx, iterDaughterPrm{
lastIter: prm.last,
- ctx: ctx,
+ ei: epochIteration,
id: daughter,
consumersIter: iter,
})
@@ -97,7 +94,7 @@ func (c *Calculator) Calculate(prm CalculatePrm) {
type iterDaughterPrm struct {
lastIter bool
- ctx Context
+ ei EpochIterationInfo
id apireputation.PeerID
@@ -105,7 +102,7 @@ type iterDaughterPrm struct {
}
// nolint: funlen
-func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
+func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id)
if err != nil {
c.opts.log.Debug("get initial trust failure",
@@ -116,7 +113,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
return
}
- daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ctx, p.id)
+ daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ei, p.id)
if err != nil {
c.opts.log.Debug("daughter trust iterator's init failure",
zap.String("error", err.Error()),
@@ -130,8 +127,8 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
err = p.consumersIter.Iterate(func(trust reputation.Trust) error {
if !p.lastIter {
select {
- case <-p.ctx.Done():
- return p.ctx.Err()
+ case <-ctx.Done():
+ return ctx.Err()
default:
}
}
@@ -155,12 +152,12 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
var intermediateTrust eigentrust.IterationTrust
- intermediateTrust.SetEpoch(p.ctx.Epoch())
+ intermediateTrust.SetEpoch(p.ei.Epoch())
intermediateTrust.SetPeer(p.id)
- intermediateTrust.SetI(p.ctx.I())
+ intermediateTrust.SetI(p.ei.I())
if p.lastIter {
- finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ctx)
+ finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei)
if err != nil {
c.opts.log.Debug("init writer failure",
zap.String("error", err.Error()),
@@ -180,7 +177,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
return
}
} else {
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ctx)
+ intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei)
if err != nil {
c.opts.log.Debug("init writer failure",
zap.String("error", err.Error()),
@@ -191,8 +188,8 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
err = daughterIter.Iterate(func(trust reputation.Trust) error {
select {
- case <-p.ctx.Done():
- return p.ctx.Err()
+ case <-ctx.Done():
+ return ctx.Err()
default:
}
@@ -201,7 +198,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
trust.SetValue(val)
- err := intermediateWriter.Write(trust)
+ err := intermediateWriter.Write(ctx, trust)
if err != nil {
c.opts.log.Debug("write value failure",
zap.String("error", err.Error()),
@@ -216,7 +213,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
)
}
- err = intermediateWriter.Close()
+ err = intermediateWriter.Close(ctx)
if err != nil {
c.opts.log.Error(
"could not close writer",
@@ -226,8 +223,8 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) {
}
}
-func (c *Calculator) sendInitialValues(ctx Context) {
- daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(ctx)
+func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochIterationInfo) {
+ daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(epochInfo)
if err != nil {
c.opts.log.Debug("all daughters trust iterator's init failure",
zap.String("error", err.Error()),
@@ -236,7 +233,7 @@ func (c *Calculator) sendInitialValues(ctx Context) {
return
}
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(ctx)
+ intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(epochInfo)
if err != nil {
c.opts.log.Debug("init writer failure",
zap.String("error", err.Error()),
@@ -263,7 +260,7 @@ func (c *Calculator) sendInitialValues(ctx Context) {
initTrust.Mul(trust.Value())
trust.SetValue(initTrust)
- err = intermediateWriter.Write(trust)
+ err = intermediateWriter.Write(ctx, trust)
if err != nil {
c.opts.log.Debug("write value failure",
zap.String("error", err.Error()),
@@ -281,7 +278,7 @@ func (c *Calculator) sendInitialValues(ctx Context) {
)
}
- err = intermediateWriter.Close()
+ err = intermediateWriter.Close(ctx)
if err != nil {
c.opts.log.Debug("could not close writer",
zap.String("error", err.Error()),
diff --git a/pkg/services/reputation/eigentrust/calculator/deps.go b/pkg/services/reputation/eigentrust/calculator/deps.go
index 66d3fd301..a22d1df76 100644
--- a/pkg/services/reputation/eigentrust/calculator/deps.go
+++ b/pkg/services/reputation/eigentrust/calculator/deps.go
@@ -1,16 +1,12 @@
package eigentrustcalc
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
)
-type Context interface {
- context.Context
-
+type EpochIterationInfo interface {
// Must return epoch number to select the values
// for global trust calculation.
Epoch() uint64
@@ -43,19 +39,19 @@ type PeerTrustsIterator interface {
type DaughterTrustIteratorProvider interface {
// InitDaughterIterator must init TrustIterator
// that iterates over received local trusts from
- // daughter p for ctx.Epoch() epoch.
- InitDaughterIterator(ctx Context, p apireputation.PeerID) (TrustIterator, error)
+ // daughter p for epochInfo.Epoch() epoch.
+ InitDaughterIterator(epochInfo EpochIterationInfo, p apireputation.PeerID) (TrustIterator, error)
// InitAllDaughtersIterator must init PeerTrustsIterator
// that must iterate over all daughters of the current
// node(manager) and all trusts received from them for
- // ctx.Epoch() epoch.
- InitAllDaughtersIterator(ctx Context) (PeerTrustsIterator, error)
+ // epochInfo.Epoch() epoch.
+ InitAllDaughtersIterator(epochInfo EpochIterationInfo) (PeerTrustsIterator, error)
// InitConsumersIterator must init PeerTrustsIterator
// that must iterate over all daughters of the current
// node(manager) and their consumers' trusts received
- // from other managers for ctx.Epoch() epoch and
- // ctx.I() iteration.
- InitConsumersIterator(Context) (PeerTrustsIterator, error)
+ // from other managers for epochInfo.Epoch() epoch and
+ // epochInfo.I() iteration.
+ InitConsumersIterator(EpochIterationInfo) (PeerTrustsIterator, error)
}
// IntermediateWriter must write intermediate result to contract.
@@ -68,7 +64,7 @@ type IntermediateWriter interface {
// IntermediateWriterProvider must provide ready-to-work
// IntermediateWriter.
type IntermediateWriterProvider interface {
- InitIntermediateWriter(Context) (IntermediateWriter, error)
+ InitIntermediateWriter(EpochIterationInfo) (IntermediateWriter, error)
}
// AlphaProvider must provide information about required
diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go
index faf953aed..1753a430b 100644
--- a/pkg/services/reputation/eigentrust/controller/calls.go
+++ b/pkg/services/reputation/eigentrust/controller/calls.go
@@ -1,6 +1,8 @@
package eigentrustctrl
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
"go.uber.org/zap"
)
@@ -22,7 +24,7 @@ func (x iterContext) Last() bool {
}
// Continue moves the global reputation calculator to the next iteration.
-func (c *Controller) Continue(prm ContinuePrm) {
+func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) {
c.mtx.Lock()
{
@@ -46,7 +48,7 @@ func (c *Controller) Continue(prm ContinuePrm) {
iterCtx.last = iterCtx.I() == iterCtx.iterationNumber-1
err := c.prm.WorkerPool.Submit(func() {
- c.prm.DaughtersTrustCalculator.Calculate(iterCtx)
+ c.prm.DaughtersTrustCalculator.Calculate(ctx, iterCtx)
// iteration++
iterCtx.Increment()
diff --git a/pkg/services/reputation/eigentrust/controller/deps.go b/pkg/services/reputation/eigentrust/controller/deps.go
index 2aeafb61a..c068f7cc4 100644
--- a/pkg/services/reputation/eigentrust/controller/deps.go
+++ b/pkg/services/reputation/eigentrust/controller/deps.go
@@ -1,5 +1,7 @@
package eigentrustctrl
+import "context"
+
// IterationContext is a context of the i-th
// stage of iterative EigenTrust algorithm.
type IterationContext interface {
@@ -25,7 +27,7 @@ type DaughtersTrustCalculator interface {
// http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1.
//
// Execution should be interrupted if ctx.Last().
- Calculate(ctx IterationContext)
+ Calculate(ctx context.Context, iter IterationContext)
}
// IterationsProvider must provide information about numbers
diff --git a/pkg/services/reputation/eigentrust/iteration.go b/pkg/services/reputation/eigentrust/iteration.go
index b06064f78..e4793f044 100644
--- a/pkg/services/reputation/eigentrust/iteration.go
+++ b/pkg/services/reputation/eigentrust/iteration.go
@@ -1,8 +1,6 @@
package eigentrust
import (
- "context"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
)
@@ -36,22 +34,11 @@ type IterationTrust struct {
reputation.Trust
}
-// IterContext aggregates context and data required for
-// iterations.
-// nolint: containedctx
-type IterContext struct {
- context.Context
- EpochIteration
-}
-
-func NewIterContext(ctx context.Context, epoch uint64, iter uint32) *IterContext {
+func NewEpochIteration(epoch uint64, iter uint32) *EpochIteration {
ei := EpochIteration{}
ei.SetI(iter)
ei.SetEpoch(epoch)
- return &IterContext{
- Context: ctx,
- EpochIteration: ei,
- }
+ return &ei
}
diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go
index 98815492c..80fa772d6 100644
--- a/pkg/services/reputation/local/controller/calls.go
+++ b/pkg/services/reputation/local/controller/calls.go
@@ -27,80 +27,75 @@ func (p *ReportPrm) SetEpoch(e uint64) {
//
// Each call acquires a report context for an Epoch parameter.
// At the very end of the operation, the context is released.
-func (c *Controller) Report(prm ReportPrm) {
+func (c *Controller) Report(ctx context.Context, prm ReportPrm) {
// acquire report
- reportCtx := c.acquireReport(prm.epoch)
- if reportCtx == nil {
+ rCtx, reporter := c.acquireReporter(ctx, prm.epoch)
+ if reporter == nil {
return
}
// report local trust values
- reportCtx.report()
+ reporter.report(rCtx)
// finally stop and free the report
- c.freeReport(prm.epoch, reportCtx.log)
+ c.freeReport(prm.epoch, reporter.log)
}
-type reportContext struct {
+type reporter struct {
epoch uint64
ctrl *Controller
log *logger.Logger
- ctx common.Context
+ ep common.EpochProvider
}
-// nolint: containedctx
-type iteratorContext struct {
- context.Context
-
+type epochProvider struct {
epoch uint64
}
-func (c iteratorContext) Epoch() uint64 {
+func (c epochProvider) Epoch() uint64 {
return c.epoch
}
-func (c *Controller) acquireReport(epoch uint64) *reportContext {
- var ctx context.Context
+func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context.Context, *reporter) {
+ started := true
c.mtx.Lock()
-
{
if cancel := c.mCtx[epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(context.Background())
+ ctx, cancel = context.WithCancel(ctx)
c.mCtx[epoch] = cancel
+ started = false
}
}
-
c.mtx.Unlock()
log := &logger.Logger{Logger: c.opts.log.With(
zap.Uint64("epoch", epoch),
)}
- if ctx == nil {
+ if started {
log.Debug("report is already started")
- return nil
+ return ctx, nil
}
- return &reportContext{
+ return ctx, &reporter{
epoch: epoch,
ctrl: c,
log: log,
- ctx: &iteratorContext{
- Context: ctx,
- epoch: epoch,
+ ep: &epochProvider{
+ epoch: epoch,
},
}
}
-func (c *reportContext) report() {
+func (c *reporter) report(ctx context.Context) {
c.log.Debug("starting to report local trust values")
// initialize iterator over locally collected values
- iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ctx)
+ iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ep)
if err != nil {
c.log.Debug("could not initialize iterator over local trust values",
zap.String("error", err.Error()),
@@ -110,7 +105,7 @@ func (c *reportContext) report() {
}
// initialize target of local trust values
- targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ctx)
+ targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ep)
if err != nil {
c.log.Debug("could not initialize local trust target",
zap.String("error", err.Error()),
@@ -123,11 +118,11 @@ func (c *reportContext) report() {
err = iterator.Iterate(
func(t reputation.Trust) error {
// check if context is done
- if err := c.ctx.Err(); err != nil {
+ if err := ctx.Err(); err != nil {
return err
}
- return targetWriter.Write(t)
+ return targetWriter.Write(ctx, t)
},
)
if err != nil && !errors.Is(err, context.Canceled) {
@@ -139,7 +134,7 @@ func (c *reportContext) report() {
}
// finish writing
- err = targetWriter.Close()
+ err = targetWriter.Close(ctx)
if err != nil {
c.log.Debug("could not finish writing local trust values",
zap.String("error", err.Error()),
diff --git a/pkg/services/reputation/local/controller/controller.go b/pkg/services/reputation/local/controller/controller.go
index 7bf56be89..373df36db 100644
--- a/pkg/services/reputation/local/controller/controller.go
+++ b/pkg/services/reputation/local/controller/controller.go
@@ -5,7 +5,7 @@ import (
"fmt"
"sync"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
+ reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router"
)
// Prm groups the required parameters of the Controller's constructor.
@@ -24,7 +24,7 @@ type Prm struct {
// trust to other nodes.
//
// Must not be nil.
- LocalTrustTarget reputationcommon.WriterProvider
+ LocalTrustTarget *reputationrouter.Router
}
// Controller represents main handler for starting
diff --git a/pkg/services/reputation/local/controller/deps.go b/pkg/services/reputation/local/controller/deps.go
index 3ab72eb5c..6f4a29c99 100644
--- a/pkg/services/reputation/local/controller/deps.go
+++ b/pkg/services/reputation/local/controller/deps.go
@@ -30,5 +30,5 @@ type IteratorProvider interface {
//
// Implementations can have different logic for different
// contexts, so specific ones may document their own behavior.
- InitIterator(common.Context) (Iterator, error)
+ InitIterator(common.EpochProvider) (Iterator, error)
}
diff --git a/pkg/services/reputation/local/controller/util.go b/pkg/services/reputation/local/controller/util.go
index 97b9e3a65..122550498 100644
--- a/pkg/services/reputation/local/controller/util.go
+++ b/pkg/services/reputation/local/controller/util.go
@@ -7,11 +7,11 @@ type storageWrapper struct {
i Iterator
}
-func (s storageWrapper) InitIterator(common.Context) (Iterator, error) {
+func (s storageWrapper) InitIterator(common.EpochProvider) (Iterator, error) {
return s.i, nil
}
-func (s storageWrapper) InitWriter(common.Context) (common.Writer, error) {
+func (s storageWrapper) InitWriter(common.EpochProvider) (common.Writer, error) {
return s.w, nil
}
From c236b54a65fb8080951e65dfbb3ade45c16278c9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 11:46:56 +0300
Subject: [PATCH 0068/1943] [#212] reputationsvc: Resolve funlen linter
Resolve funlen linter for Calculator.iterateDaughter method.
Signed-off-by: Dmitrii Stepanov
---
.../reputation/eigentrust/calculator/calls.go | 113 ++++++++++--------
1 file changed, 60 insertions(+), 53 deletions(-)
diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go
index 2cc789780..a8e5cf1da 100644
--- a/pkg/services/reputation/eigentrust/calculator/calls.go
+++ b/pkg/services/reputation/eigentrust/calculator/calls.go
@@ -101,7 +101,6 @@ type iterDaughterPrm struct {
consumersIter TrustIterator
}
-// nolint: funlen
func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id)
if err != nil {
@@ -157,69 +156,77 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
intermediateTrust.SetI(p.ei.I())
if p.lastIter {
- finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei)
- if err != nil {
- c.opts.log.Debug("init writer failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- intermediateTrust.SetValue(sum)
-
- err = finalWriter.WriteIntermediateTrust(intermediateTrust)
- if err != nil {
- c.opts.log.Debug("write final result failure",
- zap.String("error", err.Error()),
- )
-
- return
- }
+ c.processLastIteration(p, intermediateTrust, sum)
} else {
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei)
- if err != nil {
- c.opts.log.Debug("init writer failure",
- zap.String("error", err.Error()),
- )
+ c.processIntermediateIteration(ctx, p, daughterIter, sum)
+ }
+}
- return
+func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust eigentrust.IterationTrust, sum reputation.TrustValue) {
+ finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei)
+ if err != nil {
+ c.opts.log.Debug("init writer failure",
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ intermediateTrust.SetValue(sum)
+
+ err = finalWriter.WriteIntermediateTrust(intermediateTrust)
+ if err != nil {
+ c.opts.log.Debug("write final result failure",
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+}
+
+func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDaughterPrm, daughterIter TrustIterator, sum reputation.TrustValue) {
+ intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei)
+ if err != nil {
+ c.opts.log.Debug("init writer failure",
+ zap.String("error", err.Error()),
+ )
+
+ return
+ }
+
+ err = daughterIter.Iterate(func(trust reputation.Trust) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
}
- err = daughterIter.Iterate(func(trust reputation.Trust) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
+ val := trust.Value()
+ val.Mul(sum)
- val := trust.Value()
- val.Mul(sum)
+ trust.SetValue(val)
- trust.SetValue(val)
-
- err := intermediateWriter.Write(ctx, trust)
- if err != nil {
- c.opts.log.Debug("write value failure",
- zap.String("error", err.Error()),
- )
- }
-
- return nil
- })
+ err := intermediateWriter.Write(ctx, trust)
if err != nil {
- c.opts.log.Debug("iterate daughter trusts failure",
+ c.opts.log.Debug("write value failure",
zap.String("error", err.Error()),
)
}
- err = intermediateWriter.Close(ctx)
- if err != nil {
- c.opts.log.Error(
- "could not close writer",
- zap.String("error", err.Error()),
- )
- }
+ return nil
+ })
+ if err != nil {
+ c.opts.log.Debug("iterate daughter trusts failure",
+ zap.String("error", err.Error()),
+ )
+ }
+
+ err = intermediateWriter.Close(ctx)
+ if err != nil {
+ c.opts.log.Error(
+ "could not close writer",
+ zap.String("error", err.Error()),
+ )
}
}
From cb172e73a690735d192cdff465e5728ff86232ce Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Fri, 7 Apr 2023 17:12:23 +0300
Subject: [PATCH 0069/1943] [#228] node: Use uber atomic package instead
standard
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
---
pkg/local_object_storage/shard/control_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index fec268350..df7e536cb 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -6,7 +6,6 @@ import (
"math"
"os"
"path/filepath"
- "sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -28,6 +27,7 @@ import (
objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
+ "go.uber.org/atomic"
"go.uber.org/zap/zaptest"
)
From dbc3811ff4653c9262b0b9c31b19ab4655eef046 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 30 Mar 2023 14:49:15 +0300
Subject: [PATCH 0070/1943] [#191] engine: Allow to remove redundant object
copies
RemoveDuplicates() removes all duplicate object copies stored on
multiple shards. All shards are processed and the command tries to leave
a copy on the best shard according to HRW.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/engine.go | 2 +
.../engine/remove_copies.go | 138 ++++++++++++
.../engine/remove_copies_test.go | 208 ++++++++++++++++++
pkg/local_object_storage/engine/shards.go | 15 +-
4 files changed, 358 insertions(+), 5 deletions(-)
create mode 100644 pkg/local_object_storage/engine/remove_copies.go
create mode 100644 pkg/local_object_storage/engine/remove_copies_test.go
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 4d154d289..e0161bfe3 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -17,6 +17,8 @@ import (
type StorageEngine struct {
*cfg
+ removeDuplicatesInProgress atomic.Bool
+
mtx *sync.RWMutex
shards map[string]hashedShard
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
new file mode 100644
index 000000000..d881a52d1
--- /dev/null
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -0,0 +1,138 @@
+package engine
+
+import (
+ "context"
+ "errors"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/hrw"
+ "go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+// errRemoveDuplicatesInProgress is returned when another rebalancing is in progress.
+// We need it because `Rebalance` removes objects and executing it concurrently
+// on 2 shards can lead to data loss. In future this restriction could be relaxed.
+var errRemoveDuplicatesInProgress = errors.New("redundant copies removal is already in progress")
+
+const defaultRemoveDuplicatesConcurrency = 256
+
+type RemoveDuplicatesPrm struct {
+ Concurrency int
+}
+
+// RemoveDuplicates iterates over all objects and removes duplicate object copies
+// from shards which are worse as defined by HRW sort.
+// Safety:
+// 1. Concurrent execution is prohibited, thus 1 object copy should always be left.
+// 2. If we delete an object from another thread, this is not a problem. Currently,
+// we have 2 thread that can remove "valid" (non-expired and logically non-removed) objects:
+// policer and rebalance. For rebalance see (1).
+// If policer removes something, we do not care if both copies are removed or one of them is left,
+// as the remaining copy will be removed during the next policer iteration.
+func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicatesPrm) error {
+ if !e.removeDuplicatesInProgress.CompareAndSwap(false, true) {
+ return errRemoveDuplicatesInProgress
+ }
+ defer e.removeDuplicatesInProgress.Store(false)
+
+ if prm.Concurrency <= 0 {
+ prm.Concurrency = defaultRemoveDuplicatesConcurrency
+ }
+
+ e.log.Info("starting removal of locally-redundant copies",
+ zap.Int("concurrency", prm.Concurrency))
+
+ // The mutext must be taken for the whole duration to avoid target shard being removed
+ // concurrently: this can lead to data loss.
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+
+ // Iterate by shards to be sure that no objects from 2 different shards are removed simultaneously.
+ // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
+ // However we could change weights in future and easily forget this function.
+ for _, sh := range e.shards {
+ e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String()))
+ ch := make(chan oid.Address)
+
+ errG, ctx := errgroup.WithContext(ctx)
+ errG.SetLimit(prm.Concurrency + 1) // +1 for the listing thread
+
+ errG.Go(func() error {
+ defer close(ch)
+
+ var cursor *meta.Cursor
+ for {
+ var listPrm shard.ListWithCursorPrm
+ listPrm.WithCount(uint32(prm.Concurrency))
+ listPrm.WithCursor(cursor)
+ res, err := sh.ListWithCursor(listPrm)
+ if err != nil {
+ if errors.Is(err, meta.ErrEndOfListing) {
+ return nil
+ }
+ return err
+ }
+ for _, addr := range res.AddressList() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case ch <- addr.Address:
+ }
+ }
+ cursor = res.Cursor()
+ }
+ })
+
+ for i := 0; i < prm.Concurrency; i++ {
+ errG.Go(func() error {
+ return e.removeObjects(ctx, ch)
+ })
+ }
+ if err := errG.Wait(); err != nil {
+ e.log.Error("finished removal of locally-redundant copies", zap.Error(err))
+ return err
+ }
+ }
+
+ e.log.Info("finished removal of locally-redundant copies")
+ return nil
+}
+
+// removeObjects reads addresses from ch and removes all objects from other shards, excluding excludeID.
+func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address) error {
+ shards := make([]hashedShard, 0, len(e.shards))
+ for _, sh := range e.shards {
+ shards = append(shards, sh)
+ }
+
+ for addr := range ch {
+ h := hrw.Hash([]byte(addr.EncodeToString()))
+ shards := sortShardsByWeight(shards, h)
+ found := false
+ for i := range shards {
+ var existsPrm shard.ExistsPrm
+ existsPrm.SetAddress(addr)
+
+ res, err := shards[i].Exists(existsPrm)
+ if err != nil {
+ return err
+ } else if !res.Exists() {
+ continue
+ } else if !found {
+ found = true
+ continue
+ }
+
+ var deletePrm shard.DeletePrm
+ deletePrm.SetAddresses(addr)
+ _, err = shards[i].Delete(deletePrm)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
new file mode 100644
index 000000000..4415d01c8
--- /dev/null
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -0,0 +1,208 @@
+package engine
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRebalance(t *testing.T) {
+ te := newEngineWithErrorThreshold(t, "", 0)
+
+ const (
+ objCount = 20
+ copyCount = (objCount + 2) / 3
+ )
+
+ type objectWithShard struct {
+ bestShard shard.ID
+ worstShard shard.ID
+ object *objectSDK.Object
+ }
+
+ objects := make([]objectWithShard, objCount)
+ for i := range objects {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+ objects[i].object = obj
+
+ shards := te.ng.sortShardsByWeight(object.AddressOf(obj))
+ objects[i].bestShard = *shards[0].Shard.ID()
+ objects[i].worstShard = *shards[1].Shard.ID()
+ }
+
+ for i := range objects {
+ var prm shard.PutPrm
+ prm.SetObject(objects[i].object)
+
+ var err1, err2 error
+ te.ng.mtx.RLock()
+ // Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
+ if i%3 != 1 {
+ _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ }
+ if i%3 != 2 {
+ _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ }
+ te.ng.mtx.RUnlock()
+
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ }
+
+ var removedMtx sync.Mutex
+ var removed []deleteEvent
+ for _, shard := range te.shards {
+ id := *shard.id
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
+ removedMtx.Lock()
+ removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
+ removedMtx.Unlock()
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ err := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ require.NoError(t, err)
+
+ require.Equal(t, copyCount, len(removed))
+
+ removedMask := make([]bool, len(objects))
+loop:
+ for i := range removed {
+ for j := range objects {
+ if removed[i].addr == object.AddressOf(objects[j].object) {
+ require.Equal(t, objects[j].worstShard, removed[i].shardID,
+ "object %d was expected to be removed from another shard", j)
+ removedMask[j] = true
+ continue loop
+ }
+ }
+ require.FailNow(t, "unexpected object was removed", removed[i].addr)
+ }
+
+ for i := 0; i < copyCount; i++ {
+ if i%3 == 0 {
+ require.True(t, removedMask[i], "object %d was expected to be removed", i)
+ } else {
+ require.False(t, removedMask[i], "object %d was not expected to be removed", i)
+ }
+ }
+}
+
+func TestRebalanceSingleThread(t *testing.T) {
+ te := newEngineWithErrorThreshold(t, "", 0)
+
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+
+ var prm shard.PutPrm
+ prm.SetObject(obj)
+ te.ng.mtx.RLock()
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ te.ng.mtx.RUnlock()
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+
+ signal := make(chan struct{}) // unblock rebalance
+ started := make(chan struct{}) // make sure rebalance is started
+ for _, shard := range te.shards {
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(common.DeletePrm) (common.DeleteRes, error) {
+ close(started)
+ <-signal
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ var firstErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ firstErr = te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ }()
+
+ <-started
+ secondErr := te.ng.RemoveDuplicates(context.Background(), RemoveDuplicatesPrm{})
+ require.ErrorIs(t, secondErr, errRemoveDuplicatesInProgress)
+
+ close(signal)
+ wg.Wait()
+ require.NoError(t, firstErr)
+}
+
+type deleteEvent struct {
+ shardID shard.ID
+ addr oid.Address
+}
+
+func TestRebalanceExitByContext(t *testing.T) {
+ te := newEngineWithErrorThreshold(t, "", 0)
+
+ objects := make([]*objectSDK.Object, 4)
+ for i := range objects {
+ obj := testutil.GenerateObjectWithCID(cidtest.ID())
+ obj.SetPayload(make([]byte, errSmallSize))
+ objects[i] = obj
+ }
+
+ for i := range objects {
+ var prm shard.PutPrm
+ prm.SetObject(objects[i])
+
+ te.ng.mtx.RLock()
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ te.ng.mtx.RUnlock()
+
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+ }
+
+ var removed []deleteEvent
+ deleteCh := make(chan struct{})
+ signal := make(chan struct{})
+ for _, shard := range te.shards {
+ id := *shard.id
+ shard.largeFileStorage.SetOption(teststore.WithDelete(func(prm common.DeletePrm) (common.DeleteRes, error) {
+ deleteCh <- struct{}{}
+ <-signal
+ removed = append(removed, deleteEvent{shardID: id, addr: prm.Address})
+ return common.DeleteRes{}, nil
+ }))
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ var rebalanceErr error
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ rebalanceErr = te.ng.RemoveDuplicates(ctx, RemoveDuplicatesPrm{Concurrency: 1})
+ }()
+
+ const removeCount = 3
+ for i := 0; i < removeCount-1; i++ {
+ <-deleteCh
+ signal <- struct{}{}
+ }
+ <-deleteCh
+ cancel()
+ close(signal)
+
+ wg.Wait()
+ require.ErrorIs(t, rebalanceErr, context.Canceled)
+ require.Equal(t, removeCount, len(removed))
+}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 34210d835..2b1146ff2 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -208,16 +208,21 @@ func (e *StorageEngine) sortShardsByWeight(objAddr interface{ EncodeToString() s
e.mtx.RLock()
defer e.mtx.RUnlock()
+ h := hrw.Hash([]byte(objAddr.EncodeToString()))
shards := make([]hashedShard, 0, len(e.shards))
- weights := make([]float64, 0, len(e.shards))
-
for _, sh := range e.shards {
shards = append(shards, hashedShard(sh))
- weights = append(weights, e.shardWeight(sh.Shard))
+ }
+ return sortShardsByWeight(shards, h)
+}
+
+func sortShardsByWeight(shards []hashedShard, h uint64) []hashedShard {
+ weights := make([]float64, 0, len(shards))
+ for _, sh := range shards {
+ weights = append(weights, float64(sh.Shard.WeightValues().FreeSpace))
}
- hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(objAddr.EncodeToString())))
-
+ hrw.SortHasherSliceByWeightValue(shards, weights, h)
return shards
}
From 0b9622c418d89d76e9b82ac1fa4969d2b2087a79 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 7 Apr 2023 14:21:05 +0300
Subject: [PATCH 0071/1943] [#191] control: Add Doctor RPC
Doctor RPC performs complex operations on the storage engine.
Currently only duplicate removal is supported.
Signed-off-by: Evgenii Stratonikov
---
pkg/services/control/convert.go | 18 +
pkg/services/control/rpc.go | 14 +
pkg/services/control/server/doctor.go | 37 ++
pkg/services/control/service.pb.go | 711 +++++++++++++++------
pkg/services/control/service.proto | 28 +
pkg/services/control/service_frostfs.pb.go | 153 +++++
pkg/services/control/service_grpc.pb.go | 94 ++-
7 files changed, 824 insertions(+), 231 deletions(-)
create mode 100644 pkg/services/control/server/doctor.go
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index 833288bb7..f7582dd68 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -200,3 +200,21 @@ func (w *flushCacheResponseWrapper) FromGRPCMessage(m grpc.Message) error {
w.FlushCacheResponse = r
return nil
}
+
+type doctorResponseWrapper struct {
+ *DoctorResponse
+}
+
+func (w *doctorResponseWrapper) ToGRPCMessage() grpc.Message {
+ return w.DoctorResponse
+}
+
+func (w *doctorResponseWrapper) FromGRPCMessage(m grpc.Message) error {
+ r, ok := m.(*DoctorResponse)
+ if !ok {
+ return message.NewUnexpectedMessageType(m, (*DoctorResponse)(nil))
+ }
+
+ w.DoctorResponse = r
+ return nil
+}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 0779e177b..2676ea7a5 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -18,6 +18,7 @@ const (
rpcSynchronizeTree = "SynchronizeTree"
rpcEvacuateShard = "EvacuateShard"
rpcFlushCache = "FlushCache"
+ rpcDoctor = "Doctor"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -191,3 +192,16 @@ func FlushCache(cli *client.Client, req *FlushCacheRequest, opts ...client.CallO
return wResp.FlushCacheResponse, nil
}
+
+// Doctor executes ControlService.Doctor RPC.
+func Doctor(cli *client.Client, req *DoctorRequest, opts ...client.CallOption) (*DoctorResponse, error) {
+ wResp := &doctorResponseWrapper{new(DoctorResponse)}
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDoctor), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.DoctorResponse, nil
+}
diff --git a/pkg/services/control/server/doctor.go b/pkg/services/control/server/doctor.go
new file mode 100644
index 000000000..2c91d4c2b
--- /dev/null
+++ b/pkg/services/control/server/doctor.go
@@ -0,0 +1,37 @@
+package control
+
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*control.DoctorResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if !req.Body.RemoveDuplicates {
+ return nil, status.Error(codes.InvalidArgument, "operation not specified")
+ }
+
+ var prm engine.RemoveDuplicatesPrm
+ prm.Concurrency = int(req.Body.Concurrency)
+
+ err = s.s.RemoveDuplicates(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.DoctorResponse{Body: &control.DoctorResponse_Body{}}
+
+ err = SignMessage(s.key, resp)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
index 78e7b0598..ca3e2770e 100644
--- a/pkg/services/control/service.pb.go
+++ b/pkg/services/control/service.pb.go
@@ -1172,6 +1172,118 @@ func (x *FlushCacheResponse) GetSignature() *Signature {
return nil
}
+// Doctor request.
+type DoctorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *DoctorRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DoctorRequest) Reset() {
+ *x = DoctorRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorRequest) ProtoMessage() {}
+
+func (x *DoctorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorRequest.ProtoReflect.Descriptor instead.
+func (*DoctorRequest) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DoctorRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+// Doctor response.
+type DoctorResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Body *DoctorResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
+ Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
+}
+
+func (x *DoctorResponse) Reset() {
+ *x = DoctorResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorResponse) ProtoMessage() {}
+
+func (x *DoctorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorResponse.ProtoReflect.Descriptor instead.
+func (*DoctorResponse) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *DoctorResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
// Health check request body.
type HealthCheckRequest_Body struct {
state protoimpl.MessageState
@@ -1182,7 +1294,7 @@ type HealthCheckRequest_Body struct {
func (x *HealthCheckRequest_Body) Reset() {
*x = HealthCheckRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1195,7 +1307,7 @@ func (x *HealthCheckRequest_Body) String() string {
func (*HealthCheckRequest_Body) ProtoMessage() {}
func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1226,7 +1338,7 @@ type HealthCheckResponse_Body struct {
func (x *HealthCheckResponse_Body) Reset() {
*x = HealthCheckResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1239,7 +1351,7 @@ func (x *HealthCheckResponse_Body) String() string {
func (*HealthCheckResponse_Body) ProtoMessage() {}
func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1290,7 +1402,7 @@ type SetNetmapStatusRequest_Body struct {
func (x *SetNetmapStatusRequest_Body) Reset() {
*x = SetNetmapStatusRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1303,7 +1415,7 @@ func (x *SetNetmapStatusRequest_Body) String() string {
func (*SetNetmapStatusRequest_Body) ProtoMessage() {}
func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1343,7 +1455,7 @@ type SetNetmapStatusResponse_Body struct {
func (x *SetNetmapStatusResponse_Body) Reset() {
*x = SetNetmapStatusResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1356,7 +1468,7 @@ func (x *SetNetmapStatusResponse_Body) String() string {
func (*SetNetmapStatusResponse_Body) ProtoMessage() {}
func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1386,7 +1498,7 @@ type DropObjectsRequest_Body struct {
func (x *DropObjectsRequest_Body) Reset() {
*x = DropObjectsRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1399,7 +1511,7 @@ func (x *DropObjectsRequest_Body) String() string {
func (*DropObjectsRequest_Body) ProtoMessage() {}
func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1432,7 +1544,7 @@ type DropObjectsResponse_Body struct {
func (x *DropObjectsResponse_Body) Reset() {
*x = DropObjectsResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1445,7 +1557,7 @@ func (x *DropObjectsResponse_Body) String() string {
func (*DropObjectsResponse_Body) ProtoMessage() {}
func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1471,7 +1583,7 @@ type ListShardsRequest_Body struct {
func (x *ListShardsRequest_Body) Reset() {
*x = ListShardsRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1484,7 +1596,7 @@ func (x *ListShardsRequest_Body) String() string {
func (*ListShardsRequest_Body) ProtoMessage() {}
func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1513,7 +1625,7 @@ type ListShardsResponse_Body struct {
func (x *ListShardsResponse_Body) Reset() {
*x = ListShardsResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1526,7 +1638,7 @@ func (x *ListShardsResponse_Body) String() string {
func (*ListShardsResponse_Body) ProtoMessage() {}
func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1566,7 +1678,7 @@ type SetShardModeRequest_Body struct {
func (x *SetShardModeRequest_Body) Reset() {
*x = SetShardModeRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1579,7 +1691,7 @@ func (x *SetShardModeRequest_Body) String() string {
func (*SetShardModeRequest_Body) ProtoMessage() {}
func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1626,7 +1738,7 @@ type SetShardModeResponse_Body struct {
func (x *SetShardModeResponse_Body) Reset() {
*x = SetShardModeResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1639,7 +1751,7 @@ func (x *SetShardModeResponse_Body) String() string {
func (*SetShardModeResponse_Body) ProtoMessage() {}
func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1672,7 +1784,7 @@ type DumpShardRequest_Body struct {
func (x *DumpShardRequest_Body) Reset() {
*x = DumpShardRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1685,7 +1797,7 @@ func (x *DumpShardRequest_Body) String() string {
func (*DumpShardRequest_Body) ProtoMessage() {}
func (x *DumpShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1732,7 +1844,7 @@ type DumpShardResponse_Body struct {
func (x *DumpShardResponse_Body) Reset() {
*x = DumpShardResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1745,7 +1857,7 @@ func (x *DumpShardResponse_Body) String() string {
func (*DumpShardResponse_Body) ProtoMessage() {}
func (x *DumpShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1778,7 +1890,7 @@ type RestoreShardRequest_Body struct {
func (x *RestoreShardRequest_Body) Reset() {
*x = RestoreShardRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1791,7 +1903,7 @@ func (x *RestoreShardRequest_Body) String() string {
func (*RestoreShardRequest_Body) ProtoMessage() {}
func (x *RestoreShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1838,7 +1950,7 @@ type RestoreShardResponse_Body struct {
func (x *RestoreShardResponse_Body) Reset() {
*x = RestoreShardResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1851,7 +1963,7 @@ func (x *RestoreShardResponse_Body) String() string {
func (*RestoreShardResponse_Body) ProtoMessage() {}
func (x *RestoreShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1882,7 +1994,7 @@ type SynchronizeTreeRequest_Body struct {
func (x *SynchronizeTreeRequest_Body) Reset() {
*x = SynchronizeTreeRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
+ mi := &file_pkg_services_control_service_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1895,7 +2007,7 @@ func (x *SynchronizeTreeRequest_Body) String() string {
func (*SynchronizeTreeRequest_Body) ProtoMessage() {}
func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
+ mi := &file_pkg_services_control_service_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1942,7 +2054,7 @@ type SynchronizeTreeResponse_Body struct {
func (x *SynchronizeTreeResponse_Body) Reset() {
*x = SynchronizeTreeResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
+ mi := &file_pkg_services_control_service_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1955,7 +2067,7 @@ func (x *SynchronizeTreeResponse_Body) String() string {
func (*SynchronizeTreeResponse_Body) ProtoMessage() {}
func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
+ mi := &file_pkg_services_control_service_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1986,7 +2098,7 @@ type EvacuateShardRequest_Body struct {
func (x *EvacuateShardRequest_Body) Reset() {
*x = EvacuateShardRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ mi := &file_pkg_services_control_service_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1999,7 +2111,7 @@ func (x *EvacuateShardRequest_Body) String() string {
func (*EvacuateShardRequest_Body) ProtoMessage() {}
func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ mi := &file_pkg_services_control_service_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2041,7 +2153,7 @@ type EvacuateShardResponse_Body struct {
func (x *EvacuateShardResponse_Body) Reset() {
*x = EvacuateShardResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ mi := &file_pkg_services_control_service_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2054,7 +2166,7 @@ func (x *EvacuateShardResponse_Body) String() string {
func (*EvacuateShardResponse_Body) ProtoMessage() {}
func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ mi := &file_pkg_services_control_service_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2090,7 +2202,7 @@ type FlushCacheRequest_Body struct {
func (x *FlushCacheRequest_Body) Reset() {
*x = FlushCacheRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ mi := &file_pkg_services_control_service_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2103,7 +2215,7 @@ func (x *FlushCacheRequest_Body) String() string {
func (*FlushCacheRequest_Body) ProtoMessage() {}
func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ mi := &file_pkg_services_control_service_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2136,7 +2248,7 @@ type FlushCacheResponse_Body struct {
func (x *FlushCacheResponse_Body) Reset() {
*x = FlushCacheResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ mi := &file_pkg_services_control_service_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2149,7 +2261,7 @@ func (x *FlushCacheResponse_Body) String() string {
func (*FlushCacheResponse_Body) ProtoMessage() {}
func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ mi := &file_pkg_services_control_service_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2165,6 +2277,103 @@ func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) {
return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0}
}
+// Request body structure.
+type DoctorRequest_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Number of threads to use for the operation.
+ Concurrency uint32 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // Flag to search engine for duplicate objects and leave only one copy.
+ RemoveDuplicates bool `protobuf:"varint,2,opt,name=remove_duplicates,json=removeDuplicates,proto3" json:"remove_duplicates,omitempty"`
+}
+
+func (x *DoctorRequest_Body) Reset() {
+ *x = DoctorRequest_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorRequest_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorRequest_Body) ProtoMessage() {}
+
+func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorRequest_Body.ProtoReflect.Descriptor instead.
+func (*DoctorRequest_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0}
+}
+
+func (x *DoctorRequest_Body) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+
+func (x *DoctorRequest_Body) GetRemoveDuplicates() bool {
+ if x != nil {
+ return x.RemoveDuplicates
+ }
+ return false
+}
+
+// Response body structure.
+type DoctorResponse_Body struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DoctorResponse_Body) Reset() {
+ *x = DoctorResponse_Body{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoctorResponse_Body) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoctorResponse_Body) ProtoMessage() {}
+
+func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message {
+ mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoctorResponse_Body.ProtoReflect.Descriptor instead.
+func (*DoctorResponse_Body) Descriptor() ([]byte, []int) {
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0}
+}
+
var File_pkg_services_control_service_proto protoreflect.FileDescriptor
var file_pkg_services_control_service_proto_rawDesc = []byte{
@@ -2394,60 +2603,84 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x32, 0x8c, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a,
- 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e,
- 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f,
- 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a,
- 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x42, 0x0a, 0x09, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x19,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
- 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a,
- 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63,
- 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73,
- 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75,
- 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
- 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69,
- 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62,
- 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b,
- 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x22, 0xc9, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x20, 0x0a,
+ 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12,
+ 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f,
+ 0x76, 0x65, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x7c, 0x0a, 0x0e,
+ 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0xc7, 0x06, 0x0a, 0x0e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a,
+ 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65,
+ 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a,
+ 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b,
+ 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d,
+ 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x44,
+ 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75,
+ 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x4b, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12,
+ 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
+ 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12,
+ 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
+ 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68,
+ 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
+ 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
+ 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
+ 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63,
+ 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f,
+ 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
+ 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
+ 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2462,7 +2695,7 @@ func file_pkg_services_control_service_proto_rawDescGZIP() []byte {
return file_pkg_services_control_service_proto_rawDescData
}
-var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
+var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 44)
var file_pkg_services_control_service_proto_goTypes = []interface{}{
(*HealthCheckRequest)(nil), // 0: control.HealthCheckRequest
(*HealthCheckResponse)(nil), // 1: control.HealthCheckResponse
@@ -2484,103 +2717,113 @@ var file_pkg_services_control_service_proto_goTypes = []interface{}{
(*EvacuateShardResponse)(nil), // 17: control.EvacuateShardResponse
(*FlushCacheRequest)(nil), // 18: control.FlushCacheRequest
(*FlushCacheResponse)(nil), // 19: control.FlushCacheResponse
- (*HealthCheckRequest_Body)(nil), // 20: control.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 21: control.HealthCheckResponse.Body
- (*SetNetmapStatusRequest_Body)(nil), // 22: control.SetNetmapStatusRequest.Body
- (*SetNetmapStatusResponse_Body)(nil), // 23: control.SetNetmapStatusResponse.Body
- (*DropObjectsRequest_Body)(nil), // 24: control.DropObjectsRequest.Body
- (*DropObjectsResponse_Body)(nil), // 25: control.DropObjectsResponse.Body
- (*ListShardsRequest_Body)(nil), // 26: control.ListShardsRequest.Body
- (*ListShardsResponse_Body)(nil), // 27: control.ListShardsResponse.Body
- (*SetShardModeRequest_Body)(nil), // 28: control.SetShardModeRequest.Body
- (*SetShardModeResponse_Body)(nil), // 29: control.SetShardModeResponse.Body
- (*DumpShardRequest_Body)(nil), // 30: control.DumpShardRequest.Body
- (*DumpShardResponse_Body)(nil), // 31: control.DumpShardResponse.Body
- (*RestoreShardRequest_Body)(nil), // 32: control.RestoreShardRequest.Body
- (*RestoreShardResponse_Body)(nil), // 33: control.RestoreShardResponse.Body
- (*SynchronizeTreeRequest_Body)(nil), // 34: control.SynchronizeTreeRequest.Body
- (*SynchronizeTreeResponse_Body)(nil), // 35: control.SynchronizeTreeResponse.Body
- (*EvacuateShardRequest_Body)(nil), // 36: control.EvacuateShardRequest.Body
- (*EvacuateShardResponse_Body)(nil), // 37: control.EvacuateShardResponse.Body
- (*FlushCacheRequest_Body)(nil), // 38: control.FlushCacheRequest.Body
- (*FlushCacheResponse_Body)(nil), // 39: control.FlushCacheResponse.Body
- (*Signature)(nil), // 40: control.Signature
- (NetmapStatus)(0), // 41: control.NetmapStatus
- (HealthStatus)(0), // 42: control.HealthStatus
- (*ShardInfo)(nil), // 43: control.ShardInfo
- (ShardMode)(0), // 44: control.ShardMode
+ (*DoctorRequest)(nil), // 20: control.DoctorRequest
+ (*DoctorResponse)(nil), // 21: control.DoctorResponse
+ (*HealthCheckRequest_Body)(nil), // 22: control.HealthCheckRequest.Body
+ (*HealthCheckResponse_Body)(nil), // 23: control.HealthCheckResponse.Body
+ (*SetNetmapStatusRequest_Body)(nil), // 24: control.SetNetmapStatusRequest.Body
+ (*SetNetmapStatusResponse_Body)(nil), // 25: control.SetNetmapStatusResponse.Body
+ (*DropObjectsRequest_Body)(nil), // 26: control.DropObjectsRequest.Body
+ (*DropObjectsResponse_Body)(nil), // 27: control.DropObjectsResponse.Body
+ (*ListShardsRequest_Body)(nil), // 28: control.ListShardsRequest.Body
+ (*ListShardsResponse_Body)(nil), // 29: control.ListShardsResponse.Body
+ (*SetShardModeRequest_Body)(nil), // 30: control.SetShardModeRequest.Body
+ (*SetShardModeResponse_Body)(nil), // 31: control.SetShardModeResponse.Body
+ (*DumpShardRequest_Body)(nil), // 32: control.DumpShardRequest.Body
+ (*DumpShardResponse_Body)(nil), // 33: control.DumpShardResponse.Body
+ (*RestoreShardRequest_Body)(nil), // 34: control.RestoreShardRequest.Body
+ (*RestoreShardResponse_Body)(nil), // 35: control.RestoreShardResponse.Body
+ (*SynchronizeTreeRequest_Body)(nil), // 36: control.SynchronizeTreeRequest.Body
+ (*SynchronizeTreeResponse_Body)(nil), // 37: control.SynchronizeTreeResponse.Body
+ (*EvacuateShardRequest_Body)(nil), // 38: control.EvacuateShardRequest.Body
+ (*EvacuateShardResponse_Body)(nil), // 39: control.EvacuateShardResponse.Body
+ (*FlushCacheRequest_Body)(nil), // 40: control.FlushCacheRequest.Body
+ (*FlushCacheResponse_Body)(nil), // 41: control.FlushCacheResponse.Body
+ (*DoctorRequest_Body)(nil), // 42: control.DoctorRequest.Body
+ (*DoctorResponse_Body)(nil), // 43: control.DoctorResponse.Body
+ (*Signature)(nil), // 44: control.Signature
+ (NetmapStatus)(0), // 45: control.NetmapStatus
+ (HealthStatus)(0), // 46: control.HealthStatus
+ (*ShardInfo)(nil), // 47: control.ShardInfo
+ (ShardMode)(0), // 48: control.ShardMode
}
var file_pkg_services_control_service_proto_depIdxs = []int32{
- 20, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
- 40, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
- 21, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
- 40, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
- 22, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
- 40, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
- 23, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
- 40, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
- 24, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
- 40, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
- 25, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
- 40, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
- 26, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
- 40, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
- 27, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
- 40, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
- 28, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
- 40, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
- 29, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
- 40, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
- 30, // 20: control.DumpShardRequest.body:type_name -> control.DumpShardRequest.Body
- 40, // 21: control.DumpShardRequest.signature:type_name -> control.Signature
- 31, // 22: control.DumpShardResponse.body:type_name -> control.DumpShardResponse.Body
- 40, // 23: control.DumpShardResponse.signature:type_name -> control.Signature
- 32, // 24: control.RestoreShardRequest.body:type_name -> control.RestoreShardRequest.Body
- 40, // 25: control.RestoreShardRequest.signature:type_name -> control.Signature
- 33, // 26: control.RestoreShardResponse.body:type_name -> control.RestoreShardResponse.Body
- 40, // 27: control.RestoreShardResponse.signature:type_name -> control.Signature
- 34, // 28: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
- 40, // 29: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
- 35, // 30: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
- 40, // 31: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
- 36, // 32: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
- 40, // 33: control.EvacuateShardRequest.signature:type_name -> control.Signature
- 37, // 34: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
- 40, // 35: control.EvacuateShardResponse.signature:type_name -> control.Signature
- 38, // 36: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
- 40, // 37: control.FlushCacheRequest.signature:type_name -> control.Signature
- 39, // 38: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
- 40, // 39: control.FlushCacheResponse.signature:type_name -> control.Signature
- 41, // 40: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
- 42, // 41: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
- 41, // 42: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
- 43, // 43: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
- 44, // 44: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
- 0, // 45: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
- 2, // 46: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
- 4, // 47: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
- 6, // 48: control.ControlService.ListShards:input_type -> control.ListShardsRequest
- 8, // 49: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
- 10, // 50: control.ControlService.DumpShard:input_type -> control.DumpShardRequest
- 12, // 51: control.ControlService.RestoreShard:input_type -> control.RestoreShardRequest
- 14, // 52: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
- 16, // 53: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
- 18, // 54: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
- 1, // 55: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
- 3, // 56: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
- 5, // 57: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
- 7, // 58: control.ControlService.ListShards:output_type -> control.ListShardsResponse
- 9, // 59: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
- 11, // 60: control.ControlService.DumpShard:output_type -> control.DumpShardResponse
- 13, // 61: control.ControlService.RestoreShard:output_type -> control.RestoreShardResponse
- 15, // 62: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
- 17, // 63: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
- 19, // 64: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
- 55, // [55:65] is the sub-list for method output_type
- 45, // [45:55] is the sub-list for method input_type
- 45, // [45:45] is the sub-list for extension type_name
- 45, // [45:45] is the sub-list for extension extendee
- 0, // [0:45] is the sub-list for field type_name
+ 22, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
+ 44, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
+ 23, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
+ 44, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
+ 24, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
+ 44, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
+ 25, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
+ 44, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
+ 26, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
+ 44, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
+ 27, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
+ 44, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
+ 28, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
+ 44, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
+ 29, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
+ 44, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
+ 30, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
+ 44, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
+ 31, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
+ 44, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
+ 32, // 20: control.DumpShardRequest.body:type_name -> control.DumpShardRequest.Body
+ 44, // 21: control.DumpShardRequest.signature:type_name -> control.Signature
+ 33, // 22: control.DumpShardResponse.body:type_name -> control.DumpShardResponse.Body
+ 44, // 23: control.DumpShardResponse.signature:type_name -> control.Signature
+ 34, // 24: control.RestoreShardRequest.body:type_name -> control.RestoreShardRequest.Body
+ 44, // 25: control.RestoreShardRequest.signature:type_name -> control.Signature
+ 35, // 26: control.RestoreShardResponse.body:type_name -> control.RestoreShardResponse.Body
+ 44, // 27: control.RestoreShardResponse.signature:type_name -> control.Signature
+ 36, // 28: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
+ 44, // 29: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
+ 37, // 30: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
+ 44, // 31: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
+ 38, // 32: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
+ 44, // 33: control.EvacuateShardRequest.signature:type_name -> control.Signature
+ 39, // 34: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
+ 44, // 35: control.EvacuateShardResponse.signature:type_name -> control.Signature
+ 40, // 36: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
+ 44, // 37: control.FlushCacheRequest.signature:type_name -> control.Signature
+ 41, // 38: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
+ 44, // 39: control.FlushCacheResponse.signature:type_name -> control.Signature
+ 42, // 40: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body
+ 44, // 41: control.DoctorRequest.signature:type_name -> control.Signature
+ 43, // 42: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body
+ 44, // 43: control.DoctorResponse.signature:type_name -> control.Signature
+ 45, // 44: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
+ 46, // 45: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
+ 45, // 46: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
+ 47, // 47: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
+ 48, // 48: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
+ 0, // 49: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
+ 2, // 50: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
+ 4, // 51: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
+ 6, // 52: control.ControlService.ListShards:input_type -> control.ListShardsRequest
+ 8, // 53: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
+ 10, // 54: control.ControlService.DumpShard:input_type -> control.DumpShardRequest
+ 12, // 55: control.ControlService.RestoreShard:input_type -> control.RestoreShardRequest
+ 14, // 56: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
+ 16, // 57: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
+ 18, // 58: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
+ 20, // 59: control.ControlService.Doctor:input_type -> control.DoctorRequest
+ 1, // 60: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
+ 3, // 61: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
+ 5, // 62: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
+ 7, // 63: control.ControlService.ListShards:output_type -> control.ListShardsResponse
+ 9, // 64: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
+ 11, // 65: control.ControlService.DumpShard:output_type -> control.DumpShardResponse
+ 13, // 66: control.ControlService.RestoreShard:output_type -> control.RestoreShardResponse
+ 15, // 67: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
+ 17, // 68: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
+ 19, // 69: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
+ 21, // 70: control.ControlService.Doctor:output_type -> control.DoctorResponse
+ 60, // [60:71] is the sub-list for method output_type
+ 49, // [49:60] is the sub-list for method input_type
+ 49, // [49:49] is the sub-list for extension type_name
+ 49, // [49:49] is the sub-list for extension extendee
+ 0, // [0:49] is the sub-list for field type_name
}
func init() { file_pkg_services_control_service_proto_init() }
@@ -2831,7 +3074,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckRequest_Body); i {
+ switch v := v.(*DoctorRequest); i {
case 0:
return &v.state
case 1:
@@ -2843,7 +3086,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HealthCheckResponse_Body); i {
+ switch v := v.(*DoctorResponse); i {
case 0:
return &v.state
case 1:
@@ -2855,7 +3098,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusRequest_Body); i {
+ switch v := v.(*HealthCheckRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2867,7 +3110,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetNetmapStatusResponse_Body); i {
+ switch v := v.(*HealthCheckResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2879,7 +3122,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsRequest_Body); i {
+ switch v := v.(*SetNetmapStatusRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2891,7 +3134,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DropObjectsResponse_Body); i {
+ switch v := v.(*SetNetmapStatusResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2903,7 +3146,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsRequest_Body); i {
+ switch v := v.(*DropObjectsRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2915,7 +3158,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListShardsResponse_Body); i {
+ switch v := v.(*DropObjectsResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2927,7 +3170,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeRequest_Body); i {
+ switch v := v.(*ListShardsRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2939,7 +3182,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SetShardModeResponse_Body); i {
+ switch v := v.(*ListShardsResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2951,7 +3194,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardRequest_Body); i {
+ switch v := v.(*SetShardModeRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2963,7 +3206,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardResponse_Body); i {
+ switch v := v.(*SetShardModeResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2975,7 +3218,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardRequest_Body); i {
+ switch v := v.(*DumpShardRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -2987,7 +3230,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardResponse_Body); i {
+ switch v := v.(*DumpShardResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -2999,7 +3242,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeRequest_Body); i {
+ switch v := v.(*RestoreShardRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -3011,7 +3254,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SynchronizeTreeResponse_Body); i {
+ switch v := v.(*RestoreShardResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -3023,7 +3266,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardRequest_Body); i {
+ switch v := v.(*SynchronizeTreeRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -3035,7 +3278,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EvacuateShardResponse_Body); i {
+ switch v := v.(*SynchronizeTreeResponse_Body); i {
case 0:
return &v.state
case 1:
@@ -3047,7 +3290,7 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlushCacheRequest_Body); i {
+ switch v := v.(*EvacuateShardRequest_Body); i {
case 0:
return &v.state
case 1:
@@ -3059,6 +3302,30 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvacuateShardResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FlushCacheRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushCacheResponse_Body); i {
case 0:
return &v.state
@@ -3070,6 +3337,30 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
+ file_pkg_services_control_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorRequest_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_pkg_services_control_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoctorResponse_Body); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -3077,7 +3368,7 @@ func file_pkg_services_control_service_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_services_control_service_proto_rawDesc,
NumEnums: 0,
- NumMessages: 40,
+ NumMessages: 44,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 5a09a74a5..7c661e661 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -37,6 +37,9 @@ service ControlService {
// FlushCache moves all data from one shard to the others.
rpc FlushCache (FlushCacheRequest) returns (FlushCacheResponse);
+
+ // Doctor performs storage restructuring operations on engine.
+ rpc Doctor (DoctorRequest) returns (DoctorResponse);
}
// Health check request.
@@ -345,3 +348,28 @@ message FlushCacheResponse {
Body body = 1;
Signature signature = 2;
}
+
+
+// Doctor request.
+message DoctorRequest {
+ // Request body structure.
+ message Body {
+ // Number of threads to use for the operation.
+ uint32 concurrency = 1;
+ // Flag to search engine for duplicate objects and leave only one copy.
+ bool remove_duplicates = 2;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+// Doctor response.
+message DoctorResponse {
+ // Response body structure.
+ message Body {
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 5bb119090..0f50d5893 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -1548,3 +1548,156 @@ func (x *FlushCacheResponse) ReadSignedData(buf []byte) ([]byte, error) {
func (x *FlushCacheResponse) SetSignature(sig *Signature) {
x.Signature = sig
}
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorRequest_Body) StableSize() (size int) {
+ size += proto.UInt32Size(1, x.Concurrency)
+ size += proto.BoolSize(2, x.RemoveDuplicates)
+ return size
+}
+
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorRequest_Body) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.UInt32Marshal(1, buf[offset:], x.Concurrency)
+ offset += proto.BoolMarshal(2, buf[offset:], x.RemoveDuplicates)
+ return buf
+}
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorRequest) StableSize() (size int) {
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorRequest) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DoctorRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DoctorRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().StableMarshal(buf), nil
+}
+
+func (x *DoctorRequest) SetSignature(sig *Signature) {
+ x.Signature = sig
+}
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorResponse_Body) StableSize() (size int) {
+ return size
+}
+
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorResponse_Body) StableMarshal(buf []byte) []byte {
+ return buf
+}
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *DoctorResponse) StableSize() (size int) {
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// StableMarshal marshals x in protobuf binary format with stable field order.
+//
+// If buffer length is less than x.StableSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same binary format.
+func (x *DoctorResponse) StableMarshal(buf []byte) []byte {
+ if x == nil {
+ return []byte{}
+ }
+ if buf == nil {
+ buf = make([]byte, x.StableSize())
+ }
+ var offset int
+ offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
+ offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
+ return buf
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *DoctorResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *DoctorResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().StableMarshal(buf), nil
+}
+
+func (x *DoctorResponse) SetSignature(sig *Signature) {
+ x.Signature = sig
+}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 2cfddd7f5..4a4fbeac1 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
+// - protoc-gen-go-grpc v1.3.0
// - protoc v3.21.12
// source: pkg/services/control/service.proto
@@ -18,6 +18,20 @@ import (
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
+const (
+ ControlService_HealthCheck_FullMethodName = "/control.ControlService/HealthCheck"
+ ControlService_SetNetmapStatus_FullMethodName = "/control.ControlService/SetNetmapStatus"
+ ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
+ ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
+ ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
+ ControlService_DumpShard_FullMethodName = "/control.ControlService/DumpShard"
+ ControlService_RestoreShard_FullMethodName = "/control.ControlService/RestoreShard"
+ ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
+ ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
+ ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
+ ControlService_Doctor_FullMethodName = "/control.ControlService/Doctor"
+)
+
// ControlServiceClient is the client API for ControlService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
@@ -42,6 +56,8 @@ type ControlServiceClient interface {
EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error)
+ // Doctor performs storage restructuring operations on engine.
+ Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error)
}
type controlServiceClient struct {
@@ -54,7 +70,7 @@ func NewControlServiceClient(cc grpc.ClientConnInterface) ControlServiceClient {
func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/HealthCheck", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_HealthCheck_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -63,7 +79,7 @@ func (c *controlServiceClient) HealthCheck(ctx context.Context, in *HealthCheckR
func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetmapStatusRequest, opts ...grpc.CallOption) (*SetNetmapStatusResponse, error) {
out := new(SetNetmapStatusResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SetNetmapStatus", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SetNetmapStatus_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -72,7 +88,7 @@ func (c *controlServiceClient) SetNetmapStatus(ctx context.Context, in *SetNetma
func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsRequest, opts ...grpc.CallOption) (*DropObjectsResponse, error) {
out := new(DropObjectsResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/DropObjects", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_DropObjects_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -81,7 +97,7 @@ func (c *controlServiceClient) DropObjects(ctx context.Context, in *DropObjectsR
func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error) {
out := new(ListShardsResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/ListShards", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_ListShards_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -90,7 +106,7 @@ func (c *controlServiceClient) ListShards(ctx context.Context, in *ListShardsReq
func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) {
out := new(SetShardModeResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SetShardMode", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SetShardMode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -99,7 +115,7 @@ func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardMod
func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error) {
out := new(DumpShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/DumpShard", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_DumpShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -108,7 +124,7 @@ func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardReque
func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error) {
out := new(RestoreShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/RestoreShard", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_RestoreShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -117,7 +133,7 @@ func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShar
func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) {
out := new(SynchronizeTreeResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/SynchronizeTree", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_SynchronizeTree_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -126,7 +142,7 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron
func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) {
out := new(EvacuateShardResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/EvacuateShard", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -135,7 +151,16 @@ func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateSh
func (c *controlServiceClient) FlushCache(ctx context.Context, in *FlushCacheRequest, opts ...grpc.CallOption) (*FlushCacheResponse, error) {
out := new(FlushCacheResponse)
- err := c.cc.Invoke(ctx, "/control.ControlService/FlushCache", in, out, opts...)
+ err := c.cc.Invoke(ctx, ControlService_FlushCache_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *controlServiceClient) Doctor(ctx context.Context, in *DoctorRequest, opts ...grpc.CallOption) (*DoctorResponse, error) {
+ out := new(DoctorResponse)
+ err := c.cc.Invoke(ctx, ControlService_Doctor_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -166,6 +191,8 @@ type ControlServiceServer interface {
EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error)
// FlushCache moves all data from one shard to the others.
FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error)
+ // Doctor performs storage restructuring operations on engine.
+ Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -202,6 +229,9 @@ func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *Evacuat
func (UnimplementedControlServiceServer) FlushCache(context.Context, *FlushCacheRequest) (*FlushCacheResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method FlushCache not implemented")
}
+func (UnimplementedControlServiceServer) Doctor(context.Context, *DoctorRequest) (*DoctorResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Doctor not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -224,7 +254,7 @@ func _ControlService_HealthCheck_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/HealthCheck",
+ FullMethod: ControlService_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).HealthCheck(ctx, req.(*HealthCheckRequest))
@@ -242,7 +272,7 @@ func _ControlService_SetNetmapStatus_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SetNetmapStatus",
+ FullMethod: ControlService_SetNetmapStatus_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetNetmapStatus(ctx, req.(*SetNetmapStatusRequest))
@@ -260,7 +290,7 @@ func _ControlService_DropObjects_Handler(srv interface{}, ctx context.Context, d
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/DropObjects",
+ FullMethod: ControlService_DropObjects_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).DropObjects(ctx, req.(*DropObjectsRequest))
@@ -278,7 +308,7 @@ func _ControlService_ListShards_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/ListShards",
+ FullMethod: ControlService_ListShards_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).ListShards(ctx, req.(*ListShardsRequest))
@@ -296,7 +326,7 @@ func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SetShardMode",
+ FullMethod: ControlService_SetShardMode_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SetShardMode(ctx, req.(*SetShardModeRequest))
@@ -314,7 +344,7 @@ func _ControlService_DumpShard_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/DumpShard",
+ FullMethod: ControlService_DumpShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).DumpShard(ctx, req.(*DumpShardRequest))
@@ -332,7 +362,7 @@ func _ControlService_RestoreShard_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/RestoreShard",
+ FullMethod: ControlService_RestoreShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).RestoreShard(ctx, req.(*RestoreShardRequest))
@@ -350,7 +380,7 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/SynchronizeTree",
+ FullMethod: ControlService_SynchronizeTree_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).SynchronizeTree(ctx, req.(*SynchronizeTreeRequest))
@@ -368,7 +398,7 @@ func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/EvacuateShard",
+ FullMethod: ControlService_EvacuateShard_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest))
@@ -386,7 +416,7 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/control.ControlService/FlushCache",
+ FullMethod: ControlService_FlushCache_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ControlServiceServer).FlushCache(ctx, req.(*FlushCacheRequest))
@@ -394,6 +424,24 @@ func _ControlService_FlushCache_Handler(srv interface{}, ctx context.Context, de
return interceptor(ctx, in, info, handler)
}
+func _ControlService_Doctor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DoctorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).Doctor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_Doctor_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).Doctor(ctx, req.(*DoctorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -441,6 +489,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "FlushCache",
Handler: _ControlService_FlushCache_Handler,
},
+ {
+ MethodName: "Doctor",
+ Handler: _ControlService_Doctor_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
From b689027d573c7ea973786d320853b51173c96522 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 7 Apr 2023 14:36:00 +0300
Subject: [PATCH 0072/1943] [#191] cli: Add `control shards doctor` command
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-cli/modules/control/doctor.go | 53 +++++++++++++++++++++++
cmd/frostfs-cli/modules/control/shards.go | 2 +
2 files changed, 55 insertions(+)
create mode 100644 cmd/frostfs-cli/modules/control/doctor.go
diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go
new file mode 100644
index 000000000..13bb81a0a
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/doctor.go
@@ -0,0 +1,53 @@
+package control
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/spf13/cobra"
+)
+
+const (
+ concurrencyFlag = "concurrency"
+ removeDuplicatesFlag = "remove-duplicates"
+)
+
+var doctorCmd = &cobra.Command{
+ Use: "doctor",
+ Short: "Restructure node's storage",
+ Long: "Restructure node's storage",
+ Run: doctor,
+}
+
+func doctor(cmd *cobra.Command, _ []string) {
+ pk := key.Get(cmd)
+
+ req := &control.DoctorRequest{Body: new(control.DoctorRequest_Body)}
+ req.Body.Concurrency, _ = cmd.Flags().GetUint32(concurrencyFlag)
+ req.Body.RemoveDuplicates, _ = cmd.Flags().GetBool(removeDuplicatesFlag)
+
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var resp *control.DoctorResponse
+ var err error
+ err = cli.ExecRaw(func(client *client.Client) error {
+ resp, err = control.Doctor(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ cmd.Println("Operation has finished.")
+}
+
+func initControlDoctorCmd() {
+ initControlFlags(doctorCmd)
+
+ ff := doctorCmd.Flags()
+ ff.Uint32(concurrencyFlag, 0, "Number of parallel threads to use")
+ ff.Bool(removeDuplicatesFlag, false, "Remove duplicate objects")
+}
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index 6719a4acf..9d3eb5c01 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -17,6 +17,7 @@ func initControlShardsCmd() {
shardsCmd.AddCommand(restoreShardCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
+ shardsCmd.AddCommand(doctorCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
@@ -24,4 +25,5 @@ func initControlShardsCmd() {
initControlRestoreShardCmd()
initControlEvacuateShardCmd()
initControlFlushCacheCmd()
+ initControlDoctorCmd()
}
From 38ae71cc7d4f2221a7771c98c836598c4f79d265 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 12:28:41 +0300
Subject: [PATCH 0073/1943] [#213] metrics: Refactor engine metrics
Resolve funlen linter for newEngineMetrics function.
Signed-off-by: Dmitrii Stepanov
---
pkg/metrics/engine.go | 147 ++++++++++++------------------------------
1 file changed, 40 insertions(+), 107 deletions(-)
diff --git a/pkg/metrics/engine.go b/pkg/metrics/engine.go
index fbc184832..4c51a55aa 100644
--- a/pkg/metrics/engine.go
+++ b/pkg/metrics/engine.go
@@ -1,6 +1,8 @@
package metrics
import (
+ "fmt"
+ "strings"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -26,118 +28,49 @@ type (
const engineSubsystem = "engine"
-// nolint: funlen
func newEngineMetrics() engineMetrics {
- var (
- listContainersDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "list_containers_duration",
- Help: "Accumulated duration of engine list containers operations",
- })
-
- estimateContainerSizeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "estimate_container_size_duration",
- Help: "Accumulated duration of engine container size estimate operations",
- })
-
- deleteDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "delete_duration",
- Help: "Accumulated duration of engine delete operations",
- })
-
- existsDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "exists_duration",
- Help: "Accumulated duration of engine exists operations",
- })
-
- getDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "get_duration",
- Help: "Accumulated duration of engine get operations",
- })
-
- headDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "head_duration",
- Help: "Accumulated duration of engine head operations",
- })
-
- inhumeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "inhume_duration",
- Help: "Accumulated duration of engine inhume operations",
- })
-
- putDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "put_duration",
- Help: "Accumulated duration of engine put operations",
- })
-
- rangeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "range_duration",
- Help: "Accumulated duration of engine range operations",
- })
-
- searchDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "search_duration",
- Help: "Accumulated duration of engine search operations",
- })
-
- listObjectsDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "list_objects_duration",
- Help: "Accumulated duration of engine list objects operations",
- })
-
- containerSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "container_size",
- Help: "Accumulated size of all objects in a container",
- }, []string{containerIDLabelKey})
-
- payloadSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: engineSubsystem,
- Name: "payload_size",
- Help: "Accumulated size of all objects in a shard",
- }, []string{shardIDLabelKey})
- )
-
return engineMetrics{
- listContainersDuration: listContainersDuration,
- estimateContainerSizeDuration: estimateContainerSizeDuration,
- deleteDuration: deleteDuration,
- existsDuration: existsDuration,
- getDuration: getDuration,
- headDuration: headDuration,
- inhumeDuration: inhumeDuration,
- putDuration: putDuration,
- rangeDuration: rangeDuration,
- searchDuration: searchDuration,
- listObjectsDuration: listObjectsDuration,
- containerSize: *containerSize,
- payloadSize: *payloadSize,
+ listContainersDuration: newEngineMethodDurationCounter("list_containers_"),
+ estimateContainerSizeDuration: newEngineCounter("estimate_container_size_duration", "Accumulated duration of engine container size estimate operations"),
+ deleteDuration: newEngineMethodDurationCounter("delete"),
+ existsDuration: newEngineMethodDurationCounter("exists"),
+ getDuration: newEngineMethodDurationCounter("get"),
+ headDuration: newEngineMethodDurationCounter("head"),
+ inhumeDuration: newEngineMethodDurationCounter("inhume"),
+ putDuration: newEngineMethodDurationCounter("put"),
+ rangeDuration: newEngineMethodDurationCounter("range"),
+ searchDuration: newEngineMethodDurationCounter("search"),
+ listObjectsDuration: newEngineMethodDurationCounter("list_objects"),
+ containerSize: *newEngineGaugeVector("container_size", "Accumulated size of all objects in a container", []string{containerIDLabelKey}),
+ payloadSize: *newEngineGaugeVector("payload_size", "Accumulated size of all objects in a shard", []string{shardIDLabelKey}),
}
}
+func newEngineCounter(name, help string) prometheus.Counter {
+ return prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: name,
+ Help: help,
+ })
+}
+
+func newEngineMethodDurationCounter(method string) prometheus.Counter {
+ return newEngineCounter(
+ fmt.Sprintf("%s_duration", method),
+ fmt.Sprintf("Accumulated duration of engine %s operations", strings.ReplaceAll(method, "_", " ")),
+ )
+}
+
+func newEngineGaugeVector(name, help string, labels []string) *prometheus.GaugeVec {
+ return prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: engineSubsystem,
+ Name: name,
+ Help: help,
+ }, labels)
+}
+
func (m engineMetrics) register() {
prometheus.MustRegister(m.listContainersDuration)
prometheus.MustRegister(m.estimateContainerSizeDuration)
From 02831d427b01ae0152e4238721c003c987e979e5 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 5 Apr 2023 12:43:32 +0300
Subject: [PATCH 0074/1943] [#213] metrics: Refactor object metrics
Resolve funlen linter for newObjectServiceMetrics function.
Signed-off-by: Dmitrii Stepanov
---
pkg/metrics/object.go | 162 ++++++++++++------------------------------
1 file changed, 47 insertions(+), 115 deletions(-)
diff --git a/pkg/metrics/object.go b/pkg/metrics/object.go
index ffa5d481d..fae86cb4a 100644
--- a/pkg/metrics/object.go
+++ b/pkg/metrics/object.go
@@ -2,6 +2,7 @@ package metrics
import (
"fmt"
+ "strings"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -46,7 +47,7 @@ const (
containerIDLabelKey = "cid"
)
-func newMethodCallCounter(name string) methodCount {
+func newObjectMethodCallCounter(name string) methodCount {
return methodCount{
success: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
@@ -75,125 +76,56 @@ func (m methodCount) Inc(success bool) {
}
}
-// nolint: funlen
func newObjectServiceMetrics() objectServiceMetrics {
- var ( // Request counter metrics.
- getCounter = newMethodCallCounter("get")
- putCounter = newMethodCallCounter("put")
- headCounter = newMethodCallCounter("head")
- searchCounter = newMethodCallCounter("search")
- deleteCounter = newMethodCallCounter("delete")
- rangeCounter = newMethodCallCounter("range")
- rangeHashCounter = newMethodCallCounter("range_hash")
- )
-
- var ( // Request duration metrics.
- getDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "get_req_duration",
- Help: "Accumulated get request process duration",
- })
-
- putDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "put_req_duration",
- Help: "Accumulated put request process duration",
- })
-
- headDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "head_req_duration",
- Help: "Accumulated head request process duration",
- })
-
- searchDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "search_req_duration",
- Help: "Accumulated search request process duration",
- })
-
- deleteDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "delete_req_duration",
- Help: "Accumulated delete request process duration",
- })
-
- rangeDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "range_req_duration",
- Help: "Accumulated range request process duration",
- })
-
- rangeHashDuration = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "range_hash_req_duration",
- Help: "Accumulated range hash request process duration",
- })
- )
-
- var ( // Object payload metrics.
- putPayload = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "put_payload",
- Help: "Accumulated payload size at object put method",
- })
-
- getPayload = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "get_payload",
- Help: "Accumulated payload size at object get method",
- })
-
- shardsMetrics = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "counter",
- Help: "Objects counters per shards",
- },
- []string{shardIDLabelKey, counterTypeLabelKey},
- )
-
- shardsReadonly = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: namespace,
- Subsystem: objectSubsystem,
- Name: "readonly",
- Help: "Shard state",
- },
- []string{shardIDLabelKey},
- )
- )
-
return objectServiceMetrics{
- getCounter: getCounter,
- putCounter: putCounter,
- headCounter: headCounter,
- searchCounter: searchCounter,
- deleteCounter: deleteCounter,
- rangeCounter: rangeCounter,
- rangeHashCounter: rangeHashCounter,
- getDuration: getDuration,
- putDuration: putDuration,
- headDuration: headDuration,
- searchDuration: searchDuration,
- deleteDuration: deleteDuration,
- rangeDuration: rangeDuration,
- rangeHashDuration: rangeHashDuration,
- putPayload: putPayload,
- getPayload: getPayload,
- shardMetrics: shardsMetrics,
- shardsReadonly: shardsReadonly,
+ getCounter: newObjectMethodCallCounter("get"),
+ putCounter: newObjectMethodCallCounter("put"),
+ headCounter: newObjectMethodCallCounter("head"),
+ searchCounter: newObjectMethodCallCounter("search"),
+ deleteCounter: newObjectMethodCallCounter("delete"),
+ rangeCounter: newObjectMethodCallCounter("range"),
+ rangeHashCounter: newObjectMethodCallCounter("range_hash"),
+ getDuration: newObjectMethodDurationCounter("get"),
+ putDuration: newObjectMethodDurationCounter("put"),
+ headDuration: newObjectMethodDurationCounter("head"),
+ searchDuration: newObjectMethodDurationCounter("search"),
+ deleteDuration: newObjectMethodDurationCounter("delete"),
+ rangeDuration: newObjectMethodDurationCounter("range"),
+ rangeHashDuration: newObjectMethodDurationCounter("range_hash"),
+ putPayload: newObjectMethodPayloadCounter("put"),
+ getPayload: newObjectMethodPayloadCounter("get"),
+ shardMetrics: newObjectGaugeVector("counter", "Objects counters per shards", []string{shardIDLabelKey, counterTypeLabelKey}),
+ shardsReadonly: newObjectGaugeVector("readonly", "Shard state", []string{shardIDLabelKey}),
}
}
+func newObjectMethodPayloadCounter(method string) prometheus.Counter {
+ return prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: fmt.Sprintf("%s_payload", method),
+ Help: fmt.Sprintf("Accumulated payload size at object %s method", strings.ReplaceAll(method, "_", " ")),
+ })
+}
+
+func newObjectMethodDurationCounter(method string) prometheus.Counter {
+ return prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: fmt.Sprintf("%s_req_duration", method),
+ Help: fmt.Sprintf("Accumulated %s request process duration", strings.ReplaceAll(method, "_", " ")),
+ })
+}
+
+func newObjectGaugeVector(name, help string, labels []string) *prometheus.GaugeVec {
+ return prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: namespace,
+ Subsystem: objectSubsystem,
+ Name: name,
+ Help: help,
+ }, labels)
+}
+
func (m objectServiceMetrics) register() {
m.getCounter.mustRegister()
m.putCounter.mustRegister()
From 6016d78a45a0779463347af63dbefa7eb93aa889 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 13:02:37 +0300
Subject: [PATCH 0075/1943] [#223] core: Refactor object format validator
Resolve funlen linter for FormatValidator.ValidateContent method.
Signed-off-by: Dmitrii Stepanov
---
pkg/core/object/fmt.go | 196 ++++++++++++++++++++++-------------------
1 file changed, 103 insertions(+), 93 deletions(-)
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 053306356..33373b7cc 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -202,112 +202,24 @@ func (i ContentMeta) Objects() []oid.ID {
}
// ValidateContent validates payload content according to the object type.
-//
-// nolint: funlen
func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error) {
meta := ContentMeta{
typ: o.Type(),
}
switch o.Type() {
- case object.TypeRegular:
- // ignore regular objects, they do not need payload formatting
case object.TypeTombstone:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, fmt.Errorf("(%T) empty payload in tombstone", v)
- }
-
- tombstone := object.NewTombstone()
-
- if err := tombstone.Unmarshal(o.Payload()); err != nil {
- return ContentMeta{}, fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err)
- }
-
- // check if the tombstone has the same expiration in the body and the header
- exp, err := expirationEpochAttribute(o)
- if err != nil {
+ if err := v.fillAndValidateTombstoneMeta(o, &meta); err != nil {
return ContentMeta{}, err
}
-
- if exp != tombstone.ExpirationEpoch() {
- return ContentMeta{}, errTombstoneExpiration
- }
-
- // mark all objects from the tombstone body as removed in the storage engine
- _, ok := o.ContainerID()
- if !ok {
- return ContentMeta{}, errors.New("missing container ID")
- }
-
- idList := tombstone.Members()
- meta.objs = idList
case object.TypeStorageGroup:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, fmt.Errorf("(%T) empty payload in SG", v)
- }
-
- var sg storagegroup.StorageGroup
-
- if err := sg.Unmarshal(o.Payload()); err != nil {
- return ContentMeta{}, fmt.Errorf("(%T) could not unmarshal SG content: %w", v, err)
- }
-
- mm := sg.Members()
- meta.objs = mm
-
- lenMM := len(mm)
- if lenMM == 0 {
- return ContentMeta{}, errEmptySGMembers
- }
-
- uniqueFilter := make(map[oid.ID]struct{}, lenMM)
-
- for i := 0; i < lenMM; i++ {
- if _, alreadySeen := uniqueFilter[mm[i]]; alreadySeen {
- return ContentMeta{}, fmt.Errorf("storage group contains non-unique member: %s", mm[i])
- }
-
- uniqueFilter[mm[i]] = struct{}{}
+ if err := v.fillAndValidateStorageGroupMeta(o, &meta); err != nil {
+ return ContentMeta{}, err
}
case object.TypeLock:
- if len(o.Payload()) == 0 {
- return ContentMeta{}, errors.New("empty payload in lock")
+ if err := v.fillAndValidateLockMeta(o, &meta); err != nil {
+ return ContentMeta{}, err
}
-
- _, ok := o.ContainerID()
- if !ok {
- return ContentMeta{}, errors.New("missing container")
- }
-
- _, ok = o.ID()
- if !ok {
- return ContentMeta{}, errors.New("missing ID")
- }
-
- // check that LOCK object has correct expiration epoch
- lockExp, err := expirationEpochAttribute(o)
- if err != nil {
- return ContentMeta{}, fmt.Errorf("lock object expiration epoch: %w", err)
- }
-
- if currEpoch := v.netState.CurrentEpoch(); lockExp < currEpoch {
- return ContentMeta{}, fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch)
- }
-
- var lock object.Lock
-
- err = lock.Unmarshal(o.Payload())
- if err != nil {
- return ContentMeta{}, fmt.Errorf("decode lock payload: %w", err)
- }
-
- num := lock.NumberOfMembers()
- if num == 0 {
- return ContentMeta{}, errors.New("missing locked members")
- }
-
- meta.objs = make([]oid.ID, num)
- lock.ReadMembers(meta.objs)
default:
// ignore all other object types, they do not need payload formatting
}
@@ -315,6 +227,104 @@ func (v *FormatValidator) ValidateContent(o *object.Object) (ContentMeta, error)
return meta, nil
}
+func (v *FormatValidator) fillAndValidateLockMeta(o *object.Object, meta *ContentMeta) error {
+ if len(o.Payload()) == 0 {
+ return errors.New("empty payload in lock")
+ }
+
+ if _, ok := o.ContainerID(); !ok {
+ return errors.New("missing container")
+ }
+
+ if _, ok := o.ID(); !ok {
+ return errors.New("missing ID")
+ }
+ // check that LOCK object has correct expiration epoch
+ lockExp, err := expirationEpochAttribute(o)
+ if err != nil {
+ return fmt.Errorf("lock object expiration epoch: %w", err)
+ }
+
+ if currEpoch := v.netState.CurrentEpoch(); lockExp < currEpoch {
+ return fmt.Errorf("lock object expiration: %d; current: %d", lockExp, currEpoch)
+ }
+
+ var lock object.Lock
+
+ if err = lock.Unmarshal(o.Payload()); err != nil {
+ return fmt.Errorf("decode lock payload: %w", err)
+ }
+
+ num := lock.NumberOfMembers()
+ if num == 0 {
+ return errors.New("missing locked members")
+ }
+
+ meta.objs = make([]oid.ID, num)
+ lock.ReadMembers(meta.objs)
+ return nil
+}
+
+func (v *FormatValidator) fillAndValidateStorageGroupMeta(o *object.Object, meta *ContentMeta) error {
+ if len(o.Payload()) == 0 {
+ return fmt.Errorf("(%T) empty payload in storage group", v)
+ }
+
+ var sg storagegroup.StorageGroup
+
+ if err := sg.Unmarshal(o.Payload()); err != nil {
+ return fmt.Errorf("(%T) could not unmarshal storage group content: %w", v, err)
+ }
+
+ mm := sg.Members()
+ meta.objs = mm
+
+ lenMM := len(mm)
+ if lenMM == 0 {
+ return errEmptySGMembers
+ }
+
+ uniqueFilter := make(map[oid.ID]struct{}, lenMM)
+
+ for i := 0; i < lenMM; i++ {
+ if _, alreadySeen := uniqueFilter[mm[i]]; alreadySeen {
+ return fmt.Errorf("storage group contains non-unique member: %s", mm[i])
+ }
+
+ uniqueFilter[mm[i]] = struct{}{}
+ }
+ return nil
+}
+
+func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *ContentMeta) error {
+ if len(o.Payload()) == 0 {
+ return fmt.Errorf("(%T) empty payload in tombstone", v)
+ }
+
+ tombstone := object.NewTombstone()
+
+ if err := tombstone.Unmarshal(o.Payload()); err != nil {
+ return fmt.Errorf("(%T) could not unmarshal tombstone content: %w", v, err)
+ }
+ // check if the tombstone has the same expiration in the body and the header
+ exp, err := expirationEpochAttribute(o)
+ if err != nil {
+ return err
+ }
+
+ if exp != tombstone.ExpirationEpoch() {
+ return errTombstoneExpiration
+ }
+
+ // mark all objects from the tombstone body as removed in the storage engine
+ if _, ok := o.ContainerID(); !ok {
+ return errors.New("missing container ID")
+ }
+
+ meta.objs = tombstone.Members()
+ return nil
+}
+
var errExpired = errors.New("object has expired")
func (v *FormatValidator) checkExpiration(obj *object.Object) error {
From ae86cda58c35d01092c895b9d053877ac0c02938 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 13:41:45 +0300
Subject: [PATCH 0076/1943] [#223] sg: Refactor storage group parameters
Resolve containedctx linter for SearchSGPrm and GetSGPrm structs.
Signed-off-by: Dmitrii Stepanov
---
pkg/core/storagegroup/storagegroup.go | 10 ++--------
pkg/innerring/processors/audit/process.go | 6 ++----
pkg/innerring/rpc.go | 8 ++++----
3 files changed, 8 insertions(+), 16 deletions(-)
diff --git a/pkg/core/storagegroup/storagegroup.go b/pkg/core/storagegroup/storagegroup.go
index f4e584134..b16e5c61a 100644
--- a/pkg/core/storagegroup/storagegroup.go
+++ b/pkg/core/storagegroup/storagegroup.go
@@ -11,10 +11,7 @@ import (
)
// SearchSGPrm groups the parameters which are formed by Processor to search the storage group objects.
-// nolint: containedctx
type SearchSGPrm struct {
- Context context.Context
-
Container cid.ID
NodeInfo client.NodeInfo
@@ -26,10 +23,7 @@ type SearchSGDst struct {
}
// GetSGPrm groups parameter of GetSG operation.
-// nolint: containedctx
type GetSGPrm struct {
- Context context.Context
-
OID oid.ID
CID cid.ID
@@ -42,11 +36,11 @@ type SGSource interface {
// ListSG must list storage group objects in the container. Formed list must be written to destination.
//
// Must return any error encountered which did not allow to form the list.
- ListSG(*SearchSGDst, SearchSGPrm) error
+ ListSG(context.Context, *SearchSGDst, SearchSGPrm) error
// GetSG must return storage group object for the provided CID, OID,
// container and netmap state.
- GetSG(GetSGPrm) (*storagegroup.StorageGroup, error)
+ GetSG(context.Context, GetSGPrm) (*storagegroup.StorageGroup, error)
}
// StorageGroup combines storage group object ID and its structure.
diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go
index ecfc407be..656927816 100644
--- a/pkg/innerring/processors/audit/process.go
+++ b/pkg/innerring/processors/audit/process.go
@@ -153,12 +153,11 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout)
- prm.Context = ctx
prm.NodeInfo = info
var dst storagegroup.SearchSGDst
- err = ap.sgSrc.ListSG(&dst, prm)
+ err = ap.sgSrc.ListSG(ctx, &dst, prm)
cancel()
@@ -189,9 +188,8 @@ func (ap *Processor) filterExpiredSG(cid cid.ID, sgIDs []oid.ID,
ctx, cancel := context.WithTimeout(context.Background(), ap.searchTimeout)
getSGPrm.OID = sgID
- getSGPrm.Context = ctx
- sg, err := ap.sgSrc.GetSG(getSGPrm)
+ sg, err := ap.sgSrc.GetSG(ctx, getSGPrm)
cancel()
diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go
index c58e0ddc4..8e96deb7b 100644
--- a/pkg/innerring/rpc.go
+++ b/pkg/innerring/rpc.go
@@ -65,12 +65,12 @@ func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) {
// Returns storage groups structure from received object.
//
// Returns an error of type apistatus.ObjectNotFound if storage group is missing.
-func (c *ClientCache) GetSG(prm storagegroup2.GetSGPrm) (*storagegroup.StorageGroup, error) {
+func (c *ClientCache) GetSG(ctx context.Context, prm storagegroup2.GetSGPrm) (*storagegroup.StorageGroup, error) {
var sgAddress oid.Address
sgAddress.SetContainer(prm.CID)
sgAddress.SetObject(prm.OID)
- return c.getSG(prm.Context, sgAddress, &prm.NetMap, prm.Container)
+ return c.getSG(ctx, sgAddress, &prm.NetMap, prm.Container)
}
func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.NetMap, cn [][]netmap.NodeInfo) (*storagegroup.StorageGroup, error) {
@@ -214,7 +214,7 @@ func (c *ClientCache) getWrappedClient(info clientcore.NodeInfo) (frostfsapiclie
return cInternal, nil
}
-func (c ClientCache) ListSG(dst *storagegroup2.SearchSGDst, prm storagegroup2.SearchSGPrm) error {
+func (c ClientCache) ListSG(ctx context.Context, dst *storagegroup2.SearchSGDst, prm storagegroup2.SearchSGPrm) error {
cli, err := c.getWrappedClient(prm.NodeInfo)
if err != nil {
return fmt.Errorf("could not get API client from cache")
@@ -224,7 +224,7 @@ func (c ClientCache) ListSG(dst *storagegroup2.SearchSGDst, prm storagegroup2.Se
cliPrm.SetContainerID(prm.Container)
- res, err := cli.SearchSG(prm.Context, cliPrm)
+ res, err := cli.SearchSG(ctx, cliPrm)
if err != nil {
return err
}
From ccf8463e69e9abc308d3feecc24be1b9449d12e3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 15:18:56 +0300
Subject: [PATCH 0077/1943] [#223] controlsvc: Drop unnecessary nolint
Signed-off-by: Dmitrii Stepanov
---
pkg/services/control/server/gc.go | 1 -
1 file changed, 1 deletion(-)
diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go
index 7912d4e3e..d382dd7e5 100644
--- a/pkg/services/control/server/gc.go
+++ b/pkg/services/control/server/gc.go
@@ -41,7 +41,6 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques
prm.WithForceRemoval()
prm.WithAddress(addrList[i])
- //nolint: contextcheck
_, err := s.s.Delete(ctx, prm)
if err != nil && firstErr == nil {
firstErr = err
From 2ed9fd3f9445ee651fe434680af7c567328ae2c1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 15:36:37 +0300
Subject: [PATCH 0078/1943] [#223] objectsvc: Refactor request parameters
Resolve containedctx linter for commonPrm.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/util.go | 9 ++---
pkg/services/object/head/remote.go | 3 +-
pkg/services/object/internal/client/client.go | 36 +++++++------------
pkg/services/object/put/remote.go | 3 +-
pkg/services/object/search/util.go | 3 +-
5 files changed, 19 insertions(+), 35 deletions(-)
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index 3bb68862b..09d8c67af 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -113,7 +113,6 @@ func (c *clientWrapper) getObject(ctx context.Context, exec *execCtx, info corec
func (c *clientWrapper) getRange(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey, rng *object.Range) (*object.Object, error) {
var prm internalclient.PayloadRangePrm
- prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
@@ -128,7 +127,7 @@ func (c *clientWrapper) getRange(ctx context.Context, exec *execCtx, key *ecdsa.
prm.SetRawFlag()
}
- res, err := internalclient.PayloadRange(prm)
+ res, err := internalclient.PayloadRange(ctx, prm)
if err != nil {
var errAccessDenied *apistatus.ObjectAccessDenied
if errors.As(err, &errAccessDenied) {
@@ -156,7 +155,6 @@ func (c *clientWrapper) getRange(ctx context.Context, exec *execCtx, key *ecdsa.
func (c *clientWrapper) getHeadOnly(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
var prm internalclient.HeadObjectPrm
- prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
@@ -170,7 +168,7 @@ func (c *clientWrapper) getHeadOnly(ctx context.Context, exec *execCtx, key *ecd
prm.SetRawFlag()
}
- res, err := internalclient.HeadObject(prm)
+ res, err := internalclient.HeadObject(ctx, prm)
if err != nil {
return nil, err
}
@@ -181,7 +179,6 @@ func (c *clientWrapper) getHeadOnly(ctx context.Context, exec *execCtx, key *ecd
func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.PrivateKey) (*object.Object, error) {
var prm internalclient.GetObjectPrm
- prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetTTL(exec.prm.common.TTL())
prm.SetNetmapEpoch(exec.curProcEpoch)
@@ -195,7 +192,7 @@ func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.Priva
prm.SetRawFlag()
}
- res, err := internalclient.GetObject(prm)
+ res, err := internalclient.GetObject(ctx, prm)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/head/remote.go b/pkg/services/object/head/remote.go
index f50c3422a..85f076a76 100644
--- a/pkg/services/object/head/remote.go
+++ b/pkg/services/object/head/remote.go
@@ -84,13 +84,12 @@ func (h *RemoteHeader) Head(ctx context.Context, prm *RemoteHeadPrm) (*object.Ob
var headPrm internalclient.HeadObjectPrm
- headPrm.SetContext(ctx)
headPrm.SetClient(c)
headPrm.SetPrivateKey(key)
headPrm.SetAddress(prm.commonHeadPrm.addr)
headPrm.SetTTL(remoteOpTTL)
- res, err := internalclient.HeadObject(headPrm)
+ res, err := internalclient.HeadObject(ctx, headPrm)
if err != nil {
return nil, fmt.Errorf("(%T) could not head object in %s: %w", h, info.AddressGroup(), err)
}
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index ee6777a5d..10a6af271 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -18,12 +18,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
)
-// nolint: containedctx
type commonPrm struct {
cli coreclient.Client
- ctx context.Context
-
key *ecdsa.PrivateKey
tokenSession *session.Object
@@ -42,13 +39,6 @@ func (x *commonPrm) SetClient(cli coreclient.Client) {
x.cli = cli
}
-// SetContext sets context.Context for network communication.
-//
-// Required parameter.
-func (x *commonPrm) SetContext(ctx context.Context) {
- x.ctx = ctx
-}
-
// SetPrivateKey sets private key to sign the request(s).
//
// Required parameter.
@@ -138,7 +128,7 @@ func (x GetObjectRes) Object() *object.Object {
// - error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed.
//
// GetObject ignores the provided session if it is not related to the requested object.
-func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
+func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) {
// here we ignore session if it is opened for other object since such
// request will almost definitely fail. The case can occur, for example,
// when session is bound to the parent object and child object is requested.
@@ -159,7 +149,7 @@ func GetObject(prm GetObjectPrm) (*GetObjectRes, error) {
prm.cliPrm.UseKey(*prm.key)
}
- rdr, err := prm.cli.ObjectGetInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectGetInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init object reading: %w", err)
}
@@ -229,7 +219,7 @@ func (x HeadObjectRes) Header() *object.Object {
// HeadObject reads object header by address.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
// Returns:
@@ -238,7 +228,7 @@ func (x HeadObjectRes) Header() *object.Object {
// error of type *apistatus.ObjectAlreadyRemoved if the requested object is marked to be removed.
//
// HeadObject ignores the provided session if it is not related to the requested object.
-func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
+func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) {
if prm.local {
prm.cliPrm.MarkLocal()
}
@@ -254,7 +244,7 @@ func HeadObject(prm HeadObjectPrm) (*HeadObjectRes, error) {
prm.cliPrm.WithXHeaders(prm.xHeaders...)
- cliRes, err := prm.cli.ObjectHead(prm.ctx, prm.cliPrm)
+ cliRes, err := prm.cli.ObjectHead(ctx, prm.cliPrm)
if err == nil {
// pull out an error from status
err = apistatus.ErrFromStatus(cliRes.Status())
@@ -327,7 +317,7 @@ const maxInitialBufferSize = 1024 * 1024 // 1 MiB
// PayloadRange reads object payload range by address.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
// Returns:
@@ -337,7 +327,7 @@ const maxInitialBufferSize = 1024 * 1024 // 1 MiB
// error of type *apistatus.ObjectOutOfRange if the requested range is too big.
//
// PayloadRange ignores the provided session if it is not related to the requested object.
-func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
+func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, error) {
if prm.local {
prm.cliPrm.MarkLocal()
}
@@ -354,7 +344,7 @@ func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
prm.cliPrm.SetLength(prm.ln)
prm.cliPrm.WithXHeaders(prm.xHeaders...)
- rdr, err := prm.cli.ObjectRangeInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectRangeInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init payload reading: %w", err)
}
@@ -408,10 +398,10 @@ func (x PutObjectRes) ID() oid.ID {
// PutObject saves the object in local storage of the remote node.
//
-// Client, context and key must be set.
+// Client and key must be set.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
+func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
var prmCli client.PrmObjectPutInit
prmCli.MarkLocal()
@@ -430,7 +420,7 @@ func PutObject(prm PutObjectPrm) (*PutObjectRes, error) {
prmCli.WithXHeaders(prm.xHeaders...)
- w, err := prm.cli.ObjectPutInit(prm.ctx, prmCli)
+ w, err := prm.cli.ObjectPutInit(ctx, prmCli)
if err != nil {
return nil, fmt.Errorf("init object writing on client: %w", err)
}
@@ -487,7 +477,7 @@ func (x SearchObjectsRes) IDList() []oid.ID {
// SearchObjects selects objects from container which match the filters.
//
// Returns any error which prevented the operation from completing correctly in error return.
-func SearchObjects(prm SearchObjectsPrm) (*SearchObjectsRes, error) {
+func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes, error) {
if prm.local {
prm.cliPrm.MarkLocal()
}
@@ -506,7 +496,7 @@ func SearchObjects(prm SearchObjectsPrm) (*SearchObjectsRes, error) {
prm.cliPrm.UseKey(*prm.key)
}
- rdr, err := prm.cli.ObjectSearchInit(prm.ctx, prm.cliPrm)
+ rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm)
if err != nil {
return nil, fmt.Errorf("init object searching in client: %w", err)
}
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/put/remote.go
index 6933abca6..e7fa124fa 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/put/remote.go
@@ -56,7 +56,6 @@ func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifier
var prm internalclient.PutObjectPrm
- prm.SetContext(ctx)
prm.SetClient(c)
prm.SetPrivateKey(t.privateKey)
prm.SetSessionToken(t.commonPrm.SessionToken())
@@ -64,7 +63,7 @@ func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifier
prm.SetXHeaders(t.commonPrm.XHeaders())
prm.SetObject(t.obj)
- res, err := internalclient.PutObject(prm)
+ res, err := internalclient.PutObject(ctx, prm)
if err != nil {
return nil, fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
}
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index c12ed2c9f..49f3e5efd 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -99,7 +99,6 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
var prm internalclient.SearchObjectsPrm
- prm.SetContext(ctx)
prm.SetClient(c.client)
prm.SetPrivateKey(key)
prm.SetSessionToken(exec.prm.common.SessionToken())
@@ -110,7 +109,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
prm.SetContainerID(exec.containerID())
prm.SetFilters(exec.searchFilters())
- res, err := internalclient.SearchObjects(prm)
+ res, err := internalclient.SearchObjects(ctx, prm)
if err != nil {
return nil, err
}
From 93eba19a8ee619bf9716cbd9fd26ef6adc87e693 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 16:03:00 +0300
Subject: [PATCH 0079/1943] [#223] objectsvc: Refactor split-tree traverse
Resolve funlen & gocognit linters for traverseSplitChain method.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/util/chain.go | 141 ++++++++++++++++--------------
1 file changed, 75 insertions(+), 66 deletions(-)
diff --git a/pkg/services/object/util/chain.go b/pkg/services/object/util/chain.go
index e6ec93630..96dafd10e 100644
--- a/pkg/services/object/util/chain.go
+++ b/pkg/services/object/util/chain.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -72,7 +73,6 @@ func TraverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler)
return err
}
-// nolint: funlen, gocognit
func traverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler) (bool, error) {
v, err := r.Head(addr)
if err != nil {
@@ -94,80 +94,89 @@ func traverseSplitChain(r HeadReceiver, addr oid.Address, h SplitMemberHandler)
default:
return false, errors.New("lack of split information")
case withLink:
- var addr oid.Address
- addr.SetContainer(cnr)
- addr.SetObject(link)
-
- chain := make([]oid.ID, 0)
-
- if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- children := member.Children()
-
- if reverseDirection {
- chain = append(children, chain...)
- } else {
- chain = append(chain, children...)
- }
-
- return false
- }); err != nil {
- return false, err
- }
-
- var reverseChain []*object.Object
-
- for i := range chain {
- addr.SetObject(chain[i])
-
- if stop, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- if !reverseDirection {
- return h(member, false)
- }
-
- reverseChain = append(reverseChain, member)
- return false
- }); err != nil || stop {
- return stop, err
- }
- }
-
- for i := len(reverseChain) - 1; i >= 0; i-- {
- if h(reverseChain[i], false) {
- return true, nil
- }
- }
+ return traverseByLink(cnr, link, r, h)
case withLast:
- var addr oid.Address
- addr.SetContainer(cnr)
+ return traverseByLast(cnr, last, withLast, res, r, h)
+ }
+ }
+}
- for last, withLast = res.LastPart(); withLast; {
- addr.SetObject(last)
+func traverseByLink(cnr cid.ID, link oid.ID, r HeadReceiver, h SplitMemberHandler) (bool, error) {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+ addr.SetObject(link)
- var directChain []*object.Object
+ chain := make([]oid.ID, 0)
- if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
- if reverseDirection {
- last, withLast = member.PreviousID()
- return h(member, true)
- }
+ if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
+ children := member.Children()
- directChain = append(directChain, member)
+ if reverseDirection {
+ chain = append(children, chain...)
+ } else {
+ chain = append(chain, children...)
+ }
- return false
- }); err != nil {
- return false, err
- }
+ return false
+ }); err != nil {
+ return false, err
+ }
- for i := len(directChain) - 1; i >= 0; i-- {
- if h(directChain[i], true) {
- return true, nil
- }
- }
+ var reverseChain []*object.Object
- if len(directChain) > 0 {
- last, withLast = directChain[len(directChain)-1].PreviousID()
- }
+ for i := range chain {
+ addr.SetObject(chain[i])
+
+ if stop, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
+ if !reverseDirection {
+ return h(member, false)
}
+
+ reverseChain = append(reverseChain, member)
+ return false
+ }); err != nil || stop {
+ return stop, err
+ }
+ }
+
+ for i := len(reverseChain) - 1; i >= 0; i-- {
+ if h(reverseChain[i], false) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func traverseByLast(cnr cid.ID, last oid.ID, withLast bool, res *object.SplitInfo, r HeadReceiver, h SplitMemberHandler) (bool, error) {
+ var addr oid.Address
+ addr.SetContainer(cnr)
+
+ for last, withLast = res.LastPart(); withLast; {
+ addr.SetObject(last)
+
+ var directChain []*object.Object
+
+ if _, err := traverseSplitChain(r, addr, func(member *object.Object, reverseDirection bool) (stop bool) {
+ if reverseDirection {
+ last, withLast = member.PreviousID()
+ return h(member, true)
+ }
+
+ directChain = append(directChain, member)
+
+ return false
+ }); err != nil {
+ return false, err
+ }
+
+ for i := len(directChain) - 1; i >= 0; i-- {
+ if h(directChain[i], true) {
+ return true, nil
+ }
+ }
+
+ if len(directChain) > 0 {
+ last, withLast = directChain[len(directChain)-1].PreviousID()
}
}
From 2c07f831c7b58246b5182f7aafb9813187005a9e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 6 Apr 2023 16:24:43 +0300
Subject: [PATCH 0080/1943] [#223] node: Refactor cache usage
Drop excess type args.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/cache.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index eda691496..3d4fc7375 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -130,7 +130,7 @@ type ttlContainerStorage struct {
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
const containerCacheSize = 100
- lruCnrCache := newNetworkTTLCache[cid.ID, *container.Container](containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
+ lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
})
@@ -340,7 +340,7 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached
irFetcherCacheTTL = 30 * time.Second
)
- irFetcherCache := newNetworkTTLCache[struct{}, [][]byte](irFetcherCacheSize, irFetcherCacheTTL,
+ irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys()
},
From 6bf11f7cca5b2845b239b926b4c81fe56815c2fa Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 10 Apr 2023 10:44:37 +0300
Subject: [PATCH 0081/1943] [#230] CHANGELOG.md: Remove older entries
Signed-off-by: Evgenii Stratonikov
---
CHANGELOG.md | 1565 +-------------------------------------------------
1 file changed, 5 insertions(+), 1560 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6793ed340..db846936f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -67,7 +67,7 @@ Changelog for FrostFS Node
- `github.com/TrueCloudLab/hrw` to `v.1.1.1`
- Minimum go version to v1.18
-### Updating from v0.35.0
+### Updating from v0.35.0 (old NeoFS)
You need to change configuration environment variables to `FROSTFS_*` if you use any.
@@ -77,1564 +77,9 @@ more appropriate for a specific deployment.
Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__`
(existed objects with old attributes will be treated as before, but for new objects new attributes will be used).
-## [0.35.0] - 2022-12-28 - Sindo (신도, 信島)
+## Older versions
-### Added
-- `morph list-containers` in `neofs-adm` (#1689)
-- `--binary` flag in `neofs-cli object put/get/delete` commands (#1338)
-- `session` flag support to `neofs-cli object hash` (#2029)
-- Shard can now change mode when encountering background disk errors (#2035)
-- Background workers and object service now use separate client caches (#2048)
-- `replicator.pool_size` config field to tune replicator pool size (#2049)
-- Fix NNS hash parsing in morph client (#2063)
-- `neofs-cli neofs-cli acl basic/extended print` commands (#2012)
-- `neofs_node_object_*_req_count_success` prometheus metrics for tracking successfully executed requests (#1984)
-- Metric 'readonly' to get shards mode (#2022)
-- Tree service replication timeout (#2159)
-- `apiclient.reconnect_timeout` setting allowing to ignore failed clients for some time (#2164)
+This project is a fork of [NeoFS](https://github.com/nspcc-dev/neofs-node) from version v0.35.0.
+To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-node/blob/master/CHANGELOG.md.
-### Changed
-- `object lock` command reads CID and OID the same way other commands do (#1971)
-- `LOCK` object are stored on every container node (#1502)
-- `neofs-cli container get-eacl` print ACL table in json format only with arg `--json' (#2012)
-- Side chain notary deposits use max uint32 as till parameter (#1486)
-- Allow object removal without linking object (#2100)
-- `neofs-cli container delete` command pre-checks container ownership (#2106)
-- Policer cache size is now 1024 (#2158)
-- Tree service now synchronizes with container nodes in a random order (#2127)
-- Pilorama no longer tries to apply already applied operations (#2161)
-- Use `sync.Pool` in Object.PUT service (#2139)
-- Shard uses metabase for `HEAD` requests by default, not write-cache (#2167)
-- Clarify help for `--expire-at` parameter for commands `object lock/put` and `bearer create` (#2097)
-- Node spawns `GETRANGE` requests signed with the node's key if session key was not found for `RANGEHASH` (#2144)
-- Full list of container is no longer cached (#2176)
-
-### Fixed
-- Open FSTree in sync mode by default (#1992)
-- `neofs-cli container nodes`'s output (#1991)
-- Increase error counter for write-cache flush errors (#1818)
-- Correctly select the shard for applying tree service operations (#1996)
-- Do not panic and return correct errors for bad inputs in `GET_RANGE` (#2007, #2024)
-- Physical child object removal by GC (#1699)
-- Broadcasting helper objects (#1972)
-- `neofs-cli lock object`'s `lifetime` flag handling (#1972)
-- Do not move write-cache in read-only mode for flushing (#1906)
-- Child object collection on CLI side with a bearer token (#2000)
-- Fix concurrent map writes in `Object.Put` service (#2037)
-- Malformed request errors' reasons in the responses (#2028)
-- Session token's IAT and NBF checks in ACL service (#2028)
-- Losing meta information on request forwarding (#2040)
-- Assembly process triggered by a request with a bearer token (#2040)
-- Losing locking context after metabase resync (#1502)
-- Removing all trees by container ID if tree ID is empty in `pilorama.Forest.TreeDrop` (#1940)
-- Concurrent mode changes in the metabase and blobstor (#2057)
-- Panic in IR when performing HEAD requests (#2069)
-- Write-cache flush duplication (#2074)
-- Ignore error if a transaction already exists in a morph client (#2075)
-- ObjectID signature output in the CLI (#2104)
-- Pack arguments of `setPrice` invocation during contract update (#2078)
-- `neofs-cli object hash` panic (#2079)
-- Closing `neo-go` WS clients on shutdown and switch processes (#2080)
-- Making notary deposits with a zero GAS balance (#2080)
-- Notary requests on shutdown (#2075)
-- `neofs-cli container create ` check the sufficiency of the number of nodes in the selector for replicas (#2038)
-- Data duplication during request forwarding (#2047)
-- Tree service panic on `TreeMove` operation (#2140)
-- Panic in `GETRANGE` with zero length (#2095)
-- Spawning useless `GETRANGE` with zero length for a big object (#2101)
-- Incomplete object put errors do contain the deepest error's message (#2092)
-- Prioritize internal addresses for clients (#2156)
-- Force object removal via control service (#2145)
-- Synchronizing a tree now longer reports an error for a single-node container (#2154)
-- Prevent leaking goroutines in the tree service (#2162)
-- Do not search for LOCK objects when delete container when session provided (#2152)
-- Race conditions on shard's mode switch (#1956)
-- Returning expired/removed objects from write-cache (#2016)
-
-### Removed
-- `-g` option from `neofs-cli control ...` and `neofs-cli container create` commands (#2089)
-- `--header` from `neofs-cli object get` (#2090)
-
-### Updated
-- `neo-go` to `v0.100.0`
-- `spf13/cobra` to `v1.6.1`
-- `spf13/viper` to `v1.8.0`
-- `google.golang.org/grpc` to `v1.50.1`
-
-### Updating from v0.34.0
-Pass CID and OID parameters via the `--cid` and `--oid` flags, not as the command arguments.
-
-Replicator pool size can now be fine-tuned with `replicator.pool_size` config field.
-The default value is taken from `object.put.pool_size_remote` as in earlier versions.
-
-Added `neofs_node_object_*_req_count_success` metrics for tracking successfully executed requests.
-
-`neofs-cli container delete` command now requires given account or session issuer
-to match the container owner. Use `--force` (`-f`) flag to bypass this requirement.
-
-Tree service network replication can now be fine-tuned with `tree.replication_timeout` config field.
-
-## [0.34.0] - 2022-10-31 - Marado (마라도, 馬羅島)
-
-### Added
-- `--timeout` flag in `neofs-cli control` commands (#1917)
-- Document shard modes of operation (#1909)
-- `tree list` CLI command (#1332)
-- `TreeService.GetTrees` RPC (#1902)
-- All trees synchronization on bootstrap (#1902)
-- `--force` flag to `neofs-cli control set-status` command (#1916)
-- Logging `SessionService.Create` RPC on the server for debug (#1930)
-- Debian packages can now be built with `make debpackage` (#409)
-
-### Changed
-- Path to a metabase can now be reloaded with a SIGHUP (#1869)
-
-### Fixed
-- `writecache.max_object_size` is now correctly handled (#1925)
-- Correctly handle setting ONLINE netmap status after maintenance (#1922)
-- Correctly reset shard errors in `ControlService.SetShardMode` RPC (#1931)
-- Setting node's network state to `MAINTENANCE` while network settings forbid it (#1916)
-- Do not panic during API client creation (#1936)
-- Correctly sign new epoch transaction in neofs-adm for a committee of more than 4 nodes (#1949)
-- Inability to provide session to NeoFS CLI in a NeoFS-binary format (#1933)
-- `neofs-adm` now works correctly with a committee of more than 4 nodes (#1949, #1959)
-- Closing a shard now waits until GC background workers stop (#1964)
-- Make it possible to use `shard.ContainerSize` in read-only mode (#1975)
-- Storage node now starts if at least one gRPC endpoint is available (#1893)
-- Panic in API multy client (#1961)
-- Blobstor object removal log messages (#1953)
-- Missing object relatives in object removal session opened by NeoFS CLI (#1978)
-- Bringing a node back online during maintenance (#1900)
-
-### Updated
-- `neo-go` to `v0.99.4`
-- `protoc` to `v3.21.7`
-- `neofs-sdk` to `v1.0.0-rc.7`
-
-### Updating from v0.33.0
-Now storage node serves Control API `SetNemapStatus` request with `MAINTENANCE`
-status only if the mode is allowed in the network settings. To force starting the local
-maintenance on the node, provide `--force` flag to the `neofs-cli control set-status`
-command.
-
-## [0.33.0] - 2022-10-17 - Anmado (안마도, 鞍馬島)
-
-### Added
-- Serving `NetmapService.NetmapSnapshot` RPC (#1793)
-- `netmap snapshot` command of NeoFS CLI (#1793)
-- `apiclient.allow_external` config flag to fallback to node external addresses (#1817)
-- Support `MAINTENANCE` state of the storage nodes (#1680, #1681)
-- Changelog updates CI step (#1808)
-- Validate storage node configuration before node startup (#1805)
-- `neofs-node -check` command to check the configuration file (#1805)
-- `flush-cache` control service command to flush write-cache (#1806)
-- `wallet-address` flag in `neofs-adm morph refill-gas` command (#1820)
-- Validate policy before container creation (#1704)
-- `--timeout` flag in `neofs-cli` subcommands (#1837)
-- `container nodes` command to output list of nodes for container, grouped by replica (#1704)
-- Configuration flag to ignore shard in `neofs-node` (#1840)
-- Add new RPC `TreeService.Healthcheck`
-- Fallback to `GET` if `GET_RANGE` from one storage nodes to another is denied by basic ACL (#1884)
-- List of shards and logger level runtime reconfiguration (#1770)
-- `neofs-adm morph set-config` now supports well-known `MaintenanceModeAllowed` key (#1892)
-- `add`, `get-by-path` and `add-by-path` tree service CLI commands (#1332)
-- Tree synchronisation on startup (#1329)
-- Morph client returns to the highest priority endpoint after the switch (#1615)
-
-### Changed
-- Allow to evacuate shard data with `EvacuateShard` control RPC (#1800)
-- Flush write-cache when moving shard to DEGRADED mode (#1825)
-- Make `morph.cache_ttl` default value equal to morph block time (#1846)
-- Policer marks nodes under maintenance as OK without requests (#1680)
-- Unify help messages in CLI (#1854)
-- `evacuate`, `set-mode` and `flush-cache` control subcommands now accept a list of shard ids (#1867)
-- Reading `object` commands of NeoFS CLI don't open remote sessions (#1865)
-- Use hex format to print storage node ID (#1765)
-
-### Fixed
-- Description of command `netmap nodeinfo` (#1821)
-- Proper status for object.Delete if session token is missing (#1697)
-- Fail startup if metabase has an old version (#1809)
-- Storage nodes could enter the network with any state (#1796)
-- Missing check of new state value in `ControlService.SetNetmapStatus` (#1797)
-- Correlation of object session to request (#1420)
-- Do not increase error counter in `engine.Inhume` if shard is read-only (#1839)
-- `control drop-objects` can remove split objects (#1830)
-- Node's status in `neofs-cli netmap nodeinfo` command (#1833)
-- Child check in object assembly process of `ObjectService.Get` handler (#1878)
-- Shard ID in the object counter metrics (#1863)
-- Metabase migration from the first version (#1860)
-
-### Removed
-- Remove WIF and NEP2 support in `neofs-cli`'s --wallet flag (#1128)
-- Remove --generate-key option in `neofs-cli container delete` (#1692)
-- Serving `ControlService.NetmapSnapshot` RPC (#1793)
-- `control netmap-snapshot` command of NeoFS CLI (#1793)
-
-### Updated
-
-- `neofs-contract` to `v0.16.0`
-- `neofs-api-go` to `v2.14.0`
-
-### Updating from v0.32.0
-Replace using the `control netmap-snapshot` command with `netmap snapshot` one in NeoFS CLI.
-Node can now specify additional addresses in `ExternalAddr` attribute. To allow a node to dial
-other nodes external address, use `apiclient.allow_external` config setting.
-Add `--force` option to skip placement validity check for container creation.
-
-Pass `maintenance` state to `neofs-cli control set-status` to enter maintenance mode.
-If network allows maintenance state (*), it will be reflected in the network map.
-Storage nodes under maintenance are not excluded from the network map, but don't
-serve object operations. (*) can be fetched from network configuration via
-`neofs-cli netmap netinfo` command.
-
-To allow maintenance mode during neofs-adm deployments, set
-`network.maintenance_mode_allowed` parameter in config.
-
-When issuing an object session token for root (virtual, "big") objects,
-additionally include all members of the split-chain. If session context
-includes root object only, it is not spread to physical ("small") objects.
-
-`neofs-node` configuration now supports `mode: disabled` flag for a shard.
-This can be used to temporarily ignore shards without completely removing them
-from the config file.
-
-## [0.32.0] - 2022-09-14 - Pungdo (풍도, 楓島)
-
-### Added
-
-- Objects counter metric (#1712)
-- `meta` subcommand to `neofs-lens` (#1714)
-- Storage node metrics with global and per-shard object counters (#1658)
-- Removal of trees on container removal (#1630)
-- Logging new epoch events on storage node (#1763)
-- Timeout for streaming RPC (#1746)
-- `neofs-adm` is now able to dump hashes from a custom zone (#1748)
-- Empty filename support in the Tree Service (#1698)
-- Flag to `neofs-cli container list-objects` command for attribute printing (#1649)
-
-### Changed
-
-- `neofs-cli object put`'s object ID output has changed from "ID" to "OID" (#1296)
-- `neofs-cli container set-eacl` command now pre-checks container ACL's extensibility (#1652)
-- Access control in Tree service (#1628)
-- Tree service doesn't restrict depth in `rpc GetSubTree` (#1753)
-- `neofs-adm` registers contract hashes in both hex and string address formats (#1749)
-- Container list cache synchronization with the Sidechain (#1632)
-- Blobstor components are unified (#1584, #1686, #1523)
-
-### Fixed
-
-- Panic on write-cache's `Delete` operation (#1664)
-- Payload duplication in `neofs-cli storagegroup put` (#1706)
-- Contract calls in notary disabled environments (#1743)
-- `Blobovnicza.Get` op now iterates over all size buckets (#1707)
-- Object expiration time (#1670)
-- Parser of the placement policy (#1775)
-- Tree service timeout logs (#1759)
-- Object flushing on writecache side (#1745)
-- Active blobovniczas caching (#1691)
-- `neofs-adm` TX waiting (#1738)
-- `neofs-adm` registers contracts with a minimal GAS payment (#1683)
-- Permissions of the file created by `neofs-cli` (#1719)
-- `neofs-adm` creates TX with a high priority attribute (#1702)
-- Storage node's restart after a hard reboot (#1647)
-
-### Updated
-
-- `neo-go` to `v0.99.2`
-- `nspcc-dev/neofs-contract` to `v0.15.5`
-- `prometheus/client_golang` to `v1.13.0`
-- `google.golang.org/protobuf` to `v1.28.1`
-
-### Updating from v0.31.0
-
-Storage Node now collects object count prometheus metrics: `neofs_node_object_counter`.
-
-Provide `--no-precheck` flag to `neofs-cli container set-eacl` for unconditional sending of a request
-(previous default behavior).
-
-## [0.31.0] - 2022-08-04 - Baengnyeongdo (백령도, 白翎島)
-
-### Added
-
-- `neofs-adm` allows deploying arbitrary contracts (#1629)
-
-### Changed
-
-- Priority order in the Morph client (#1648)
-
-### Fixed
-
-- Losing request context in eACL response checks (#1595)
-- Do not return expired objects that have not been handled by the GC yet (#1634)
-- Setting CID field in `neofs-cli acl extended create` (#1650)
-- `neofs-ir` no longer hangs if it cannot bind to the control endpoint (#1643)
-- Do not require `lifetime` flag in `session create` CLI command (#1655)
-- Using deprecated gRPC options (#1644)
-- Increasing metabase error counter on disabled pilorama (#1642)
-- Deadlock in the morph client related to synchronous notification handling (#1653)
-- Slow metabase `COMMON_PREFIX` search for empty prefix (#1656)
-
-### Removed
-
-- Deprecated `profiler` and `metrics` configuration sections (#1654)
-
-### Updated
-
-- `chzyer/realine` to `v1.5.1`
-- `google/uuid` to `v1.3.0`
-- `nats-io/nats.go` to `v1.16.0`
-- `prometheus/client_golang` to `v1.12.2`
-- `spf13/cast` to `v1.5.0`
-- `spf13/viper` to `v1.12.0`
-- `go.uber.org/zap` to `v1.21.0`
-- `google.golang.org/grpc` to `v1.48.0`
-
-### Updating from v0.30.0
-1. Change `morph.endpoint.client` priority values using the following rule:
-the higher the priority the lower the value (non-specified or `0` values are
-interpreted as the highest priority -- `1`).
-2. Deprecated `profiler` and `metrics` configuration sections are dropped,
-use `pprof` and `prometheus` instead.
-
-## [0.30.2] - 2022-08-01
-
-### Added
-- `EACL_NOT_FOUND` status code support (#1645).
-
-## [0.30.1] - 2022-07-29
-
-### Fixed
-
-- `GetRange` operation now works correctly with objects stored in write-cache (#1638)
-- Losing request context in eACL response checks (#1595)
-- Wrong balance contract in innerring processor (#1636)
-- `neofs-adm` now sets groups in manifest for all contracts properly (#1631)
-
-### Updated
-
-- `neo-go` to `v0.99.1`
-- `neofs-sdk-go` to `v1.0.0-rc.6`
-
-## [0.30.0] - 2022-07-22 - Saengildo (생일도, 生日島)
-
-### Added
-
-- Profiler and metrics services now should be enabled with a separate flag
-- Experimental support for the tree-service, disabled by default (#1607)
-- Homomorphic hashes calculation can be disabled across the whole network (#1365)
-- Improve `neofs-adm` auto-completion (#1594)
-
-### Changed
-
-- Require SG members to be unique (#1490)
-- `neofs-cli` now doesn't remove container with LOCK objects without `--force` flag (#1500)
-- LOCK objects are now required to have an expiration epoch (#1461)
-- `morph` sections in IR and storage node configuration now accept an address and a priority of an endpoint (#1609)
-- Morph client now retries connecting to the failed endpoint too (#1609)
-- Redirecting `GET` and `GETRANGE` requests now does not store full object copy in memory (#1605)
-- `neofs-adm` now registers candidates during initialization in a single transaction (#1608)
-
-### Fixed
-- Invalid smart contract address in balance contract listener (#1636)
-
-- Shard now can start in degraded mode if the metabase is unavailable (#1559)
-- Shard can now be disabled completely on init failure (#1559)
-- Storage group members are now required to be unique (#1490)
-- Print shard ID in component logs (#1611)
-
-### Updated
-- `neofs-contract` to `v0.15.3`
-- `neo-go` to the pre-release version
-- `github.com/spf13/cobra` to v1.5.0
-
-### Updating from v0.29.0
-1. Change morph endpoints from simple string to a pair of `address` and `priority`. The second can be omitted.
-For inner ring node this resides in `morph.endpoint.client` section,
-for storage node -- in `morph.rpc_endpoint` section. See `config/example` for an example.
-
-2. Move `storage.default` section to `storage.shard.default`.
-3. Rename `metrics` and `profiler` sections to `prometheus` and `pprof` respectively, though old versions are supported.
-In addition, these sections must now be explicitly enabled with `enabled: true` flag.
-
-## [0.29.0] - 2022-07-07 - Yeonpyeongdo (연평도, 延坪島)
-
-Support WalletConnect signature scheme.
-
-### Added
-- Retrieve passwords for storage wallets from the configuration in neofs-adm (#1539)
-- Metabase format versioning (#1483)
-- `neofs-adm` generates wallets in a pretty JSON format
-- `Makefile` supports building from sources without a git repo
-
-### Fixed
-- Do not replicate object twice to the same node (#1410)
-- Concurrent object handling by the Policer (#1411)
-- Attaching API version to the forwarded requests (#1581)
-- Node OOM panics on `GetRange` request with extremely huge range length (#1590)
-
-### Updated
-- `neofs-sdk-go` to latest pre-release version
-- `tzhash` to `v1.6.1`
-
-## [0.28.3] - 2022-06-08
-
-### Updated
-- Neo-go 0.98.3 => 0.99.0 (#1480)
-
-### Changed
-- Replace pointers with raw structures in results for local storage (#1460)
-- Move common CLI's functions in a separate package (#1452)
-
-### Fixed
-- Confirmation of eACL tables by alphabet nodes when ACL extensibility is disabled (#1485)
-- Do not use WS neo-go client in `neofs-adm` (#1378)
-- Log more detailed network errors by the Alphabet (#1487)
-- Fix container verification by the Alphabet (#1464)
-- Include alphabet contracts to the base group in `neofs-adm` (#1489)
-
-## [0.28.2] - 2022-06-03
-
-### Updated
-- Neo-go 0.98.2 => 0.98.3 (#1430)
-- NeoFS SDK v1.0.0-rc.3 => v1.0.0-rc.4
-- NeoFS API v2.12.1 => v2.12.2
-- NeoFS Contracts v0.14.2 => v0.15.1
-
-### Added
-- Config examples for Inner ring application (#1358)
-- Command for documentation generation for `neofs-cli`, `neofs-adm` and `neofs-lens` (#1396)
-
-### Fixed
-- Do not ask for contract wallet password twice (#1346)
-- Do not update NNS group if the key is the same (#1375)
-- Make LOCODE messages more descriptive (#1394)
-- Basic income transfer's incorrect log message (#1374)
-- Listen to subnet removal events in notary-enabled env (#1224)
-- Update/remove nodes whose subnet has been removed (#1162)
-- Potential removal of local object when policy isn't complied (#1335)
-- Metabase `Select` is now slightly faster (#1433)
-- Fix a number of bugs in writecache (#1462)
-- Refactor eACL processing and fix bugs (#1471)
-- Do not validate subnet removal by IR (#1441)
-- Replace pointers with raw structures in parameters for local storage (#1418)
-
-#### Removed
-- Remove `num` and `shard_num` parameters from the configuration (#1474)
-
-## [0.28.1] - 2022-05-05
-
-### Fixed
-- Loss of the connection scheme during address parsing in NeoFS CLI (#1351)
-
-## [0.28.0] - 2022-04-29 - Heuksando (흑산도, 黑山島)
-
-### Added
-
-- `morph dump-balances` command to NeoFS Adm (#1308)
-- Ability to provide session token from file in NeoFS CLI (#1216)
-
-### Fixed
-
-- Panic in `netmap netinfo` command of NeoFS CLI (#1312)
-- Container cache invalidation on DELETE op (#1313)
-- Subscription to side-chain events in shards (#1321)
-- Trusted object creation without session token (#1283)
-- Storing invalid objects during trusted PUT op (#1286)
-- RAM overhead when writing objects to local storage (#1343)
-
-### Changed
-
-- NeoFS Adm output from stderr to stdout (#1311)
-- Node's object GC mechanism (#1318)
-
-### Updating from v0.28.0-rc.3
-Clean up all metabases and re-sync them using `resync_metabase` config flag.
-
-## [0.28.0-rc.3] - 2022-04-08
-
-### Fixed
-- Check expiration epoch of provided session token (#1168)
-- Prevent corruption in `writecache.Head` (#1149)
-- Use separate caches in N3 RPC multi client (#1213)
-- `neofs-adm` fixes (#1288, #1294, #1295)
-- Don't stop notification listener twice (#1291)
-- Metabase panic (#1293)
-- Disallow to tick block timer twice on the same height (#1208)
-
-### Added
-- Persistent storage for session tokens (#1189)
-- Cache for Inner Ring list fetcher (#1278)
-- Degraded mode of storage engine (#1143)
-- Command to change native Policy contract in `neofs-adm` (#1289)
-- Single websocket endpoint pool for RPC and notifications (#1053)
-
-### Changed
-- Cache NeoFS clients based only on public key (#1157)
-- Make `blobovnicza.Put` idempotent (#1262)
-- Optimize metabase list operations (#1262)
-- PDP check ranges are now asked in random order (#1163)
-- Update go version up to v1.17 (#1250)
-
-### Removed
-- Reduced amount of slices with pointers (#1239)
-
-### Updating from v0.28.0-rc.2
-Remove `NEOFS_IR_MAINNET_ENDPOINT_NOTIFICATION`,
-`NEOFS_IR_MORPH_ENDPOINT_NOTIFICATION`, and `NEOFS_MORPH_NOTIFICATION_ENDPOINT`
-from Inner Ring and Storage configurations.
-
-Specify _WebSocket_ endpoints in `NEOFS_IR_MAINNET_ENDPOINT_CLIENT`,
-`NEOFS_IR_MORPH_ENDPOINT_CLIENT`, and `NEOFS_MORPH_RPC_ENDPOINT` at Inner Ring
-and Storage configurations.
-
-Specify path to persistent session token db in Storage configuration with
-`NEOFS_NODE_PERSISTENT_SESSIONS_PATH`.
-
-## [0.28.0-rc.2] - 2022-03-24
-
-### Fixed
-- Respect format flags for `SplitInfo` output (#1233)
-- Output errors in neofs-cli to stderr where possible (#1259)
-
-### Added
-- Print details for AccessDenied errors in neofs-cli (#1252)
-- Split client creation into 2 stages (#1244)
-- Update morph client to work with v0.15.0 (#1253)
-
-## [0.28.0-rc.1] - 2022-03-18
-
-Native RFC-6979 signatures of messages and tokens, LOCK object types,
-experimental notifications over NATS with NeoFS API v2.12 support
-
-### Fixed
-- Remove session tokens from local storage of storage node after expiration (#1133)
-- Readme typos (#1167)
-- LOCODE attribute and announced address are not mandatory for relay node config (#1114)
-- Check session token verb (#1191)
-- Fix data race leading to reputation data loss (#1210)
-
-### Added
-- Look for `CustomGroup` scope in NNS contract before contract invocation (#749)
-- Cache of notary transaction heights (#1151)
-- NATS notifications (#1183)
-- LOCK object type (#1175, #1176, #1181)
-- Progress bar for object upload/download in neofs-cli (#1185)
-- Support of new status codes (#1247)
-
-### Changed
-- Update neofs-api-go and neofs-sdk-go (#1101, #1131, #1195, #1209, #1231)
-- Use `path/filepath` package for OS path management (#1132)
-- Shard sets mode to `read-only` if it hits threshold limit (#1118)
-- Use request timeout in chain client of neofs-adm (#1115)
-- Generate wallets with 0644 permissions in neofs-adm (#1115)
-- Use cache of parsed addresses in GC (#1115)
-- Determine config type based on file extension in neofs-ir (#1115)
-- Reuse some error defined in contracts (#1115)
-- Improved neofs-cli usability (#1103)
-- Refactor v2 / SDK packages in eACL (#596)
-
-### Removed
-- Remove some wrappers from `morph` package (#625)
-- `GetRange` method in blobovnicza (#1115)
-- Deprecated structures from SDK v1.0.0 rc (#1181)
-
-### Updating from neofs-node v0.27.5
-Set shard error threshold for read-only mode switch with
-`NEOFS_STORAGE_SHARD_RO_ERROR_THRESHOLD` (default: 0, deactivated).
-
-Set NATS configuration for notifications in `NEOFS_NODE_NOTIFICATION` section.
-See example config for more details.
-
-## [0.27.7] - 2022-03-30
-
-### Fixed
-- Shard ID is now consistent between restarts (#1204)
-
-### Added
-- More N3 RPC caches in object service (#1278)
-
-## [0.27.6] - 2022-03-28
-
-### Fixed
-- Allow empty passwords in neofs-cli config (#1136)
-- Set correct audit range hash type in neofs-ir (#1180)
-- Read objects directly from blobstor in case of shard inconsistency (#1186)
-- Fix `-w` flag in subnet commands of neofs-adm (#1223)
-- Do not use explicit mutex lock in chain caches (#1236)
-- Force gRPC server stop if it can't shut down gracefully in storage node (#1270)
-- Return non-zero exit code in `acl extended create` command failures and fix
- help message (#1259)
-
-### Added
-- Interactive storage node configurator in neofs-adm (#1090)
-- Logs in metabase operations (#1188)
-
-## [0.27.5] - 2022-01-31
-
-### Fixed
-- Flush small objects when persist write cache (#1088)
-- Empty response body in object.Search request (#1098)
-- Inner ring correctly checks session token in container.SetEACL request (#1110)
-- Printing in verbose mode in CLI (#1120)
-- Subnet removal event processing (#1123)
-
-### Added
-- Password support in CLI config (#1103)
-- Shard dump restore commands in CLI (#1085, #1086)
-- `acl extended create` command in CLI (#1092)
-
-### Changed
-- Adopt new `owner.ID` API from SDK (#1100)
-- Use `go install` instead of `go get` in Makefile (#1102)
-- Storage node returns Fixed12 decimal on accounting.Balance request. CLI
- prints Fixed8 rounded value by default. (#1084)
-- Support new update interface for NNS contract in NeoFS Adm (#1091)
-- Rename `use_write_cache` to `writecache.enabled` in stoarge config (#1117)
-- Preallocate slice in `headersFromObject` (#1115)
-- Unify collection of expired objects (#1115)
-- Calculate blobovnicza size at initialization properly (#1115)
-- Process fast search filters outside bbolt transaction (#1115)
-- Update TZHash library to v1.5.1
-
-### Removed
-- `--wif` and `--binary-key` keys from CLI (#1083)
-- Extended ACL validator moved to SDK library (#1096)
-- `--generate-key` flag in CLI control commands (#1103)
-- Various unused code (#1123)
-
-### Upgrading from v0.27.4
-Use `--wallet` key in CLI to provide WIF or binary key file instead of `--wif`
-and `--binary-key`.
-
-Replace `NEOFS_STORAGE_SHARD_N_USE_WRITE_CACHE` with
-`NEOFS_STORAGE_SHARD_N_WRITECACHE_ENABLED` in Storage node config.
-
-Specify `password: xxx` in config file for NeoFS CLI to avoid password input.
-
-## [0.27.4] - 2022-01-13
-
-### Fixed
-- ACL check did not produce status code (#1062)
-- Asset transfer wrapper used incorrect receiver (#1069)
-- Empty search response missed meta header and body (#1063)
-- IR node in single chain environment used incorrect source of IR list (#1025)
-- Incorrect message sequence in object.Range request (#1077)
-
-### Added
-- Option to disable compression of object based on their content-type attribute
- (#1060)
-
-### Changed
-- Factor out autocomplete command in CLI and Adm (#1041)
-- Single crypto rand source (#851)
-
-### Upgrading from v0.27.3
-To disable compression for object with specific content-types, specify them
-as a string array in blobstor section:
-`NEOFS_STORAGE_SHARD_N_BLOBSTOR_COMPRESSION_EXCLUDE_CONTENT_TYPES`. Use
-asterisk as wildcard, e.g. `video/*`.
-
-## [0.27.3] - 2021-12-30
-
-### Added
-- `SetShardMode` RPC in control API, available in CLI (#1044)
-- Support of basic ACL constants without final flag in CLI (#1066)
-
-### Changed
-- `neofs-adm` updates contracts in single tx (#1035)
-- Proxy contract arguments for deployment in `neofs-adm` (#1056)
-
-## [0.27.2] - 2021-12-28
-
-### Fixed
-- Goroutine leak due to infinite response message await ([neofs-api-go#366](https://github.com/nspcc-dev/neofs-api-go/pull/366))
-- Inconsistency in placement function ([neofs-sdk-go#108](https://github.com/nspcc-dev/neofs-sdk-go/pull/108))
-
-### Added
-- `ListShards` RPC in control API, available in CLI (#1043)
-- Epoch metric in Storage and Inner Ring applications (#1054)
-
-### Changed
-- Some object replication related logs were moved to DEBUG level (#1052)
-
-## [0.27.1] - 2021-12-20
-
-### Fixed
-- Big objects now flushed from WriteCache after write (#1028)
-- WriteCache big object counter (#1022)
-- Panic in the container estimation routing (#1016)
-- Shutdown freeze in policer component (#1047)
-
-### Added
-- Shorthand `-g` for `--generate-key` in NeoFS CLI (#1034)
-- Autocomplete generator command for neofs-adm (#1013)
-- Max connection per host config value for neo-go client (#780)
-- Sanity check of session token context in container service (#1045)
-
-### Changed
-- CLI now checks NeoFS status code for exit code (#1039)
-- New `Update` method signature for NNS contract in neofs-adm (#1038)
-
-## [0.27.0] - 2021-12-09 - Sinjido (신지도, 薪智島)
-
-NeoFS API v2.11.0 support with response status codes and storage subnetworks.
-
-### Fixed
-- CLI now opens LOCODE database in read-only mode for listing command (#958)
-- Tombstone owner now is always set (#842)
-- Node in relay mode does not require shard config anymore (#969)
-- Alphabet nodes now ignore notary notifications with non-HALT main tx (#976)
-- neofs-adm now prints version of NNS contract (#1014)
-- Possible NPE in blobovnicza (#1007)
-- More precise calculation of blobovnicza size (#915)
-
-### Added
-- Maintenance mode for Storage node (#922)
-- Float values in Storage node config (#903)
-- Status codes for NeoFS API Response messages (#961)
-- Subnetwork support (#977, #973, #983, #974, #982, #979, #998, #995, #1001, #1004)
-- Customized fee for named container registration (#1008)
-
-### Changed
-- Alphabet contract number is not mandatory (#880)
-- Alphabet nodes resign `AddPeer` request if it updates Storage node info (#938)
-- All applications now use client from neofs-sdk-go library (#966)
-- Some shard configuration records were renamed, see upgrading section (#859)
-- `Nonce` and `VUB` values of notary transactions generated from notification
- hash (#844)
-- Non alphabet notary invocations now have 4 witnesses (#975)
-- Object replication is now async and continuous (#965)
-- NeoFS ADM updated for the neofs-contract v0.13.0 deploy (#984)
-- Minimal TLS version is set to v1.2 (#878)
-- Alphabet nodes now invoke `netmap.Register` to add node to the network map
- candidates in notary enabled environment (#1008)
-
-### Upgrading from v0.26.1
-`NEOFS_IR_CONTRACTS_ALPHABET_AMOUNT` is not mandatory env anymore. If it
-is not set, Inner Ring would try to read maximum from config and NNS contract.
-However, that parameter still can be set in order to require the exact number
-of contracts.
-
-Shard configuration records were renamed:
-- `refill_metabase` -> `resync_metabase`
-- `writecache.max_size` -> `writecache.max_object_size`
-- `writecache.mem_size` -> `writecache.memcache_capacity`
-- `writecache.size_limit` -> `writecache_capcity`
-- `blobstor.blobovnicza.opened_cache_size` -> `blobstor.blobovnicza.opened_cache_capacity`
-- `*.shallow_depth` -> `*.depth`
-- `*.shallow_width` -> `*.width`
-- `*.small_size_limit` -> `*.small_object_size`
-
-Specify storage subnetworks in `NEOFS_NODE_SUBNET_ENTRIES` as the list of
-integer numbers. To exit default subnet, use `NEOFS_NODE_SUBNET_EXIT_ZERO=true`
-
-Specify fee for named container registration in notary disabled environment
-with `NEOFS_IR_FEE_NAMED_CONTAINER_REGISTER`.
-
-## [0.26.1] - 2021-11-02
-
-### Fixed
-- Storage Node handles requests before its initialization is finished (#934)
-- Release worker pools gracefully (#901)
-- Metabase ignored containers of storage group and tombstone objects
- in listing (#945)
-- CLI missed endpoint flag in `control netmap-snapshot` command (#942)
-- Write cache object persisting (#866)
-
-### Added
-- Quote symbol support in `.env` example tests (#935)
-- FSTree object counter (#821)
-- neofs-adm prints contract version in `dump-hashes` command (#940)
-- Default values section in shard configuration (#877)
-- neofs-adm downloads contracts directly from GitHub (#733)
-
-### Changed
-- Use FSTree counter in write cache (#821)
-- Calculate notary deposit `till` parameter depending on available
- deposit (#910)
-- Storage node returns session token error if attached token's private key
- is not available (#943)
-- Refactor of NeoFS API client in inner ring (#946)
-- LOCODE generator tries to find the closest continent if there are
- no exact match (#955)
-
-### Upgrading from v0.26.0
-You can specify default section in storage engine configuration.
-See [example](./config/example/node.yaml) for more details.
-
-## [0.26.0] - 2021-10-19 - Udo (우도, 牛島)
-
-NeoFS API v2.10 support
-
-### Fixed
-- Check remote node public key in every response message (#645)
-- Do not lose local container size estimations (#872)
-- Compressed and uncompressed objects are always available for reading
- regardless of compression configuration (#868)
-- Use request session token in ACL check of object.Put (#881)
-- Parse URI in neofs-cli properly (#883)
-- Parse minutes in LOCODE DB properly (#902)
-- Remove expired tombstones (#884)
-- Close all opened blobovniczas properly (#896)
-- Do not accept objects with empty OwnerID field (#841)
-
-### Added
-- More logs in governance and policer components (#867, #882)
-- Contract address getter in static blockchain clients (#627)
-- Alphabet configuration option to disable governance sync (#869)
-- neofs-lens app implementation (#791)
-- Detailed comments in neofs-node config example (#858)
-- Size suffixes support in neofs-node config (#857)
-- Docs for neofs-adm (#906)
-- Side chain block size duration and global NeoFS configuration in
- NetworkConfig response (#833)
-- Support native container names (#889)
-
-### Changed
-- Updated grpc to v1.41.0 (#860)
-- Updated neo-go to v0.97.3 (#833)
-- Updated neofs-api-go to v1.30.0
-- Adopt neofs-adm for new contracts release (#835, #888)
-- Adopt neofs-node for new contracts release (#905)
-- SN and IR notary deposits are made dynamically depending on the Notary and
- GAS balances (#771)
-- VMagent port in testnet config is now 443 (#908)
-- Use per-shard worker pools for object.Put operations (#674)
-- Renamed `--rpc-endpoint` CLI flag for `control command` to `--endpoint` (#879)
-
-### Removed
-- Global flags in CLI. Deleted useless flags from `accounting balance`
- command (#810).
-- Interactive mode in docker run command (#916)
-
-### Upgrading from v0.25.1
-Deleted `NEOFS_IR_NOTARY_SIDE_DEPOSIT_AMOUNT`, `NEOFS_IR_NOTARY_MAIN_DEPOSIT_AMOUNT`
-and `NEOFS_IR_TIMERS_SIDE_NOTARY`, `NEOFS_IR_TIMERS_MAIN_NOTARY` Inner Ring envs.
-Deleted `NEOFS_MORPH_NOTARY_DEPOSIT_AMOUNT` and `NEOFS_MORPH_NOTARY_DEPOSIT_DURATION`
-Storage Node envs.
-`control` CLI command does not have `--rpc-endpoint`/`r` flag, use `endpoint`
-instead.
-
-## [0.25.1] - 2021-09-29
-
-### Fixed
-- Panic caused by missing Neo RPC endpoints in storage node's config (#863)
-
-### Added
-- Support of multiple Neo RPC endpoints in Inner Ring node (#792)
-
-`mainchain` section of storage node config is left unused by the application.
-
-## [0.25.0] - 2021-09-27 - Mungapdo (문갑도, 文甲島)
-
-### Fixed
-- Work of a storage node with one Neo RPC endpoint instead of a list (#746)
-- Lack of support for HEAD operation on the object write cache (#762)
-- Storage node attribute parsing is stable now (#787)
-- Inner Ring node now logs transaction hashes of Deposit and Withdraw events
- in LittleEndian encoding (#794)
-- Storage node uses public keys of the remote nodes in placement traverser
- checks (#645)
-- Extended ACL `Target` check of role and public keys is mutual exclusive now
- (#816)
-- neofs-adm supports update and deploy of neofs-contract v0.11.0 (#834, #836)
-- Possible NPE in public key conversion (#848)
-- Object assembly routine do not forward existing request instead of creating
- new one (#839)
-- Shard now returns only physical stored objects for replication (#840)
-
-### Added
-- Support events from P2P notary pool
-- Smart contract address auto negotiation with NNS contract (#736)
-- Detailed logs for all data writing operations in storage engine (#790)
-- Docker build and release targets in Makefile (#785)
-- Metabase restore option in the shard config (#789)
-- Write cache used size limit in bytes (#776)
-
-### Changed
-- Reduce container creation delay via listening P2P notary pool (#519)
-- Extended ACL table is not limited to 1KiB (#731)
-- Netmap side chain client wrapper now has `TryNotary` option (#793)
-- Sticky bit is ignored in requests with `SYSTEM` role (#818)
-- Incomplete object put error now contains last RPC error (#778)
-- Container service invalidates container cache on writing operations (#803)
-- Improved write cache size counters (#776)
-- Metabase returns `NotFound` error instead of `AlreadyRemoved` on GCMarked
- objects (#840)
-- Object service uses two routine pools for remote and local GET requests (#845)
-
-### Removed
-- Dockerfile for AllInOne image moved to a separate repository (#796)
-
-### Upgrading from v0.24.1
-Added `NEOFS_CONTRACTS_PROXY` env for Storage Node; mandatory in
-notary enabled environments only. It should contain proxy contract's
-scripthash in side chain.
-
-Added `NEOFS_MORPH_NOTARY_DEPOSIT_AMOUNT` and
-`NEOFS_MORPH_NOTARY_DEPOSIT_DURATION` envs for Storage Node, that
-have default values, not required. They should contain notary deposit
-amount and frequency(in blocks) respectively.
-
-All side chain contract address config values are optional. If side chain
-contract address is not specified, then value gathered from NNS contract.
-
-Added `NEOFS_STORAGE_SHARD__WRITECACHE_SIZE_LIMIT` where `` is shard ID.
-This is the size limit for the all write cache storages combined in bytes. Default
-size limit is 1 GiB.
-
-Added `NEOFS_STORAGE_SHARD__REFILL_METABASE` bool flag where `` is shard
-ID. This flag purges metabase instance at the application start and reinitialize
-it with available objects from the blobstor.
-
-Object service pool size now split into `NEOFS_OBJECT_PUT_POOL_SIZE_REMOTE` and
-`NEOFS_OBJECT_PUT_POOL_SIZE_LOCAL` configuration records.
-
-## [0.24.1] - 2021-09-07
-
-### Fixed
-- Storage and Inner Ring will not start until Neo RPC node will have the height
-of the latest processed block by the nodes (#795)
-
-### Upgrading from v0.24.0
-Specify path to the local state DB in Inner Ring node config with
-`NEOFS_IR_NODE_PERSISTENT_STATE_PATH`. Specify path to the local state DB in
-Storage node config with `NEOFS_NODE_PERSISTENT_STATE_PATH`.
-
-## [0.24.0] - 2021-08-30 Anmyeondo (안면도, 安眠島)
-
-### Fixed
-- Linter warning messages (#766)
-- Storage Node does not register itself in network in relay mode now (#761)
-
-### Changed
-- `neofs-adm` fails when is called in a notary-disabled environment (#757)
-- `neofs-adm` uses `neo-go` client's native NNS resolving method instead of the custom one (#756)
-- Node selects pseudo-random list of objects from metabase for replication (#715)
-
-### Added
-- Contract update support in `neofs-adm` utility (#748)
-- Container transferring support in `neofs-adm` utility (#755)
-- Storage Node's balance refilling support in `neofs-adm` utility (#758)
-- Support `COMMON_PREFIX` filter for object attributes in storage engine and `neofs-cli` (#760)
-- Node's and IR's notary status debug message on startup (#758)
-- Go `1.17` unit tests in CI (#766)
-- Supporting all eACL filter fields from the specification (#768)
-- Cache for Container service's read operations (#676)
-
-### Updated
-- `neofs-api-go` library to `v1.29.0`
-
-### Removed
-- Unused `DB_SIZE` parameter of writecache (#773)
-
-### Upgrading from v0.23.1
-Storage Node does not read unused `NEOFS_STORAGE_SHARD_XXX_WRITECACHE_DB_SIZE`
-config parameter anymore.
-
-## [0.23.1] - 2021-08-06
-
-N3 Mainnet launch release with minor fixes.
-
-### Added
-- Initial version of `neofs-adm` tool for fast side chain deployment and
- management in private installations
-- Notary support auto negotiation (#709)
-- Option to disable side chain cache in Storage node (#704)
-- Escape symbols in Storage node attributes (#700)
-
-### Changed
-- Updated neo-go to v0.97.1
-- Updated multiaddr lib to v0.4.0 with native TLS protocol support (#661)
-- Default file permission in storage engine is 660 (#646)
-
-### Fixed
-- Container size estimation routine now aggregates values by cid-epoch tuple
- (#723)
-- Storage engine always creates executable dirs (#646)
-- GC routines in storage engine shards shutdown gracefully (#745)
-- Handle context shutdown at NeoFS multi client group address switching (#737)
-- Scope for main chain invocations from Inner Ring nodes (#751)
-
-### Upgrading from v0.23.0
-Added `NEOFS_MORPH_DISABLE_CACHE` env. If `true`, none of
-the `eACL`/`netmap`/`container` RPC responses cached.
-
-Remove `WITHOUT_NOTARY` and `WITHOUT_MAIN_NOTARY` records from Inner Ring node
-config. Notary support is now auto negotiated.
-
-## [0.23.0] - 2021-07-23 - Wando (완도, 莞島)
-
-Improved stability for notary disabled environment.
-
-### Added
-- Alphabet wallets generation command in neofs-adm (#684)
-- Initial epoch timer tick synchronization at Inner Ring node startup (#679)
-
-### Changed
-- `--address` flag is optional in NeoFS CLI (#656)
-- Notary subsystem now logs `ValidUntilBlock` (#677)
-- Updated neo-go to v0.96.1
-- Storage Node configuration example contains usable parameters (#699)
-
-### Fixed
-- Do not use side chain RoleManagement contract as source of Inner Ring list
- when notary disabled in side chain (#672)
-- Alphabet list transition is even more effective (#697)
-- Inner Ring node does not require proxy and processing contracts if notary
- disabled (#701, #714)
-
-### Upgrading from v0.22.3
-To upgrade Storage node or Inner Ring node from v0.22.3, you don't need to
-change configuration files. Make sure, that NEO RPC nodes, specified in config,
-are connected to N3 RC4 (Testnet) network.
-
-## [0.22.3] - 2021-07-13
-
-### Added
-- Support binary eACL format in container CLI command ([#650](https://github.com/nspcc-dev/neofs-node/issues/650)).
-- Dockerfile for neofs-adm utility ([#680](https://github.com/nspcc-dev/neofs-node/pull/680)).
-
-### Changed
-- All docker files moved to `.docker` dir ([#682](https://github.com/nspcc-dev/neofs-node/pull/682)).
-
-### Fixed
-- Do not require MainNet attributes in "Without MainNet" mode ([#663](https://github.com/nspcc-dev/neofs-node/issues/663)).
-- Stable alphabet list merge in Inner Ring governance ([#670](https://github.com/nspcc-dev/neofs-node/issues/670)).
-- User can specify only wallet section without node key ([#690](https://github.com/nspcc-dev/neofs-node/pull/690)).
-- Log keys in hex format in reputation errors ([#693](https://github.com/nspcc-dev/neofs-node/pull/693)).
-- Connections leak and reduced amount of connection overall ([#692](https://github.com/nspcc-dev/neofs-node/issues/692)).
-
-### Removed
-- Debug output of public key in Inner Ring log ([#689](https://github.com/nspcc-dev/neofs-node/pull/689)).
-
-## [0.22.2] - 2021-07-07
-
-Updated broken version of NeoFS API Go.
-
-### Updated
-- NeoFS API Go: [v1.28.3](https://github.com/nspcc-dev/neofs-api-go/releases/tag/v1.28.3).
-
-## [0.22.1] - 2021-07-07
-
-### Added
-- `GetCandidates` method to morph client wrapper ([#647](https://github.com/nspcc-dev/neofs-node/pull/647)).
-- All-in-One Docker image that contains all NeoFS related binaries ([#662](https://github.com/nspcc-dev/neofs-node/pull/662)).
-- `--version` flag to Storage Node binary ([#664](https://github.com/nspcc-dev/neofs-node/issues/664)).
-
-### Changed
-- Do not check NeoFS version in `LocalNodeInfo` requests and `Put` container operations; `v2.7.0` is genesis version of NeoFS ([#660](https://github.com/nspcc-dev/neofs-node/pull/660)).
-- All error calls of CLI return `1` exit code ([#657](https://github.com/nspcc-dev/neofs-node/issues/657)).
-
-### Fixed
-- Do not use multisignature for audit operations ([#658](https://github.com/nspcc-dev/neofs-node/pull/658)).
-- Skip audit for containers without Storage Groups ([#659](https://github.com/nspcc-dev/neofs-node/issues/659)).
-
-### Updated
-- NeoFS API Go: [v1.28.2](https://github.com/nspcc-dev/neofs-api-go/releases/tag/v1.28.2).
-
-## [0.22.0] - 2021-06-29 - Muuido (무의도, 舞衣島)
-
-Storage nodes with a group of network endpoints.
-
-### Added
-- Support of Neo wallet credentials in CLI ([#610](https://github.com/nspcc-dev/neofs-node/issues/610)).
-- More reliable approval of trust value by IR ([#500](https://github.com/nspcc-dev/neofs-node/issues/500)).
-- Storage node's ability to announce and serve on multiple network addresses ([#607](https://github.com/nspcc-dev/neofs-node/issues/607)).
-- Validation of network addresses of netmap candidates in IR ([#557](https://github.com/nspcc-dev/neofs-node/issues/557)).
-- Control service with healthcheck RPC in IR and CLI support ([#414](https://github.com/nspcc-dev/neofs-node/issues/414)).
-
-### Fixed
-- Approval of objects with with duplicate attribute keys or empty values ([#633](https://github.com/nspcc-dev/neofs-node/issues/633)).
-- Approval of containers with with duplicate attribute keys or empty values ([#634](https://github.com/nspcc-dev/neofs-node/issues/634)).
-- Default path for CLI config ([#626](https://github.com/nspcc-dev/neofs-node/issues/626)).
-
-### Changed
-- `version` command replaced with `--version` flag in CLI ([#571](https://github.com/nspcc-dev/neofs-node/issues/571)).
-- Command usage text is not printed on errors in CLI ([#623](https://github.com/nspcc-dev/neofs-node/issues/623)).
-- `netmap snapshot` command replaced with `control netmap-snapshot` one in CLI ([#651](https://github.com/nspcc-dev/neofs-node/issues/651)).
-- IR does not include nodes with LOCODE derived attributes to the network map ([#412](https://github.com/nspcc-dev/neofs-node/issues/412)).
-- IR uses morph/client packages for contract invocations ([#496](https://github.com/nspcc-dev/neofs-node/issues/496)).
-- Writecache decreases local size when objects are flushed ([#568](https://github.com/nspcc-dev/neofs-node/issues/568)).
-- IR can override global configuration values only in debug build ([#363](https://github.com/nspcc-dev/neofs-node/issues/363)).
-
-### Updated
-- Neo Go: [v0.95.3](https://github.com/nspcc-dev/neo-go/releases/tag/v0.95.3).
-- NeoFS API Go: [v1.28.0](https://github.com/nspcc-dev/neofs-api-go/releases/tag/v1.28.0).
-- protobuf: [v1.26.0](https://github.com/protocolbuffers/protobuf-go/releases/tag/v1.26.0).
-- uuid: [v1.2.0](https://github.com/google/uuid/releases/tag/v1.2.0).
-- compress: [v1.13.1](https://github.com/klauspost/compress/releases/tag/v1.13.1).
-- base58: [v1.2.0](https://github.com/mr-tron/base58/releases/tag/v1.2.0).
-- multiaddr: [v0.3.2](https://github.com/multiformats/go-multiaddr/releases/tag/v0.3.2).
-- ants: [v2.4.0](https://github.com/panjf2000/ants/releases/tag/v2.4.0).
-- orb: [v0.2.2](https://github.com/paulmach/orb/releases/tag/v0.2.2).
-- prometheus: [v1.11.0](https://github.com/prometheus/client_golang/releases/tag/v1.11.0).
-- testify: [v1.7.0](https://github.com/stretchr/testify/releases/tag/v1.7.0).
-- atomic: [v1.8.0](https://github.com/uber-go/atomic/releases/tag/v1.8.0).
-- zap: [v1.17.0](https://github.com/uber-go/zap/releases/tag/v1.17.0).
-- grpc: [v1.38.0](https://github.com/grpc/grpc-go/releases/tag/v1.38.0).
-- cast: [v1.3.1](https://github.com/spf13/cast/releases/tag/v1.3.1).
-- cobra: [1.1.3](https://github.com/spf13/cobra/releases/tag/v1.1.3).
-- viper: [v1.8.1](https://github.com/spf13/viper/releases/tag/v1.8.1).
-
-## [0.21.1] - 2021-06-10
-
-### Fixed
-- Session token lifetime check (#589).
-- Payload size check on the relayed objects (#580).
-
-### Added
-- VMagent to collect metrics from testnet storage image
-
-### Changed
-- Updated neofs-api-go to v1.27.1 release.
-
-## [0.21.0] - 2021-06-03 - Seongmodo (석모도, 席毛島)
-
-Session token support in container service, refactored config in storage node,
-TLS support on gRPC servers.
-
-### Fixed
-- ACL service traverses over all RequestMetaHeader chain to find
- bearer and session tokens (#548).
-- Object service correctly resends complete objects without attached
- session token (#501).
-- Inner ring processes `neofs.Bind` and `neofs.Unbind` notifications (#556).
-- Client cache now gracefully closes all available connections (#567).
-
-### Added
-- Session token support in container service for `container.Put`,
- `container.Delete` and `container.SetEACL` operations.
-- Session token support in container and sign command of NeoFS CLI.
-- TLS encryption support of gRPC service in storage node.
-
-### Changed
-- Inner ring listens RoleManagement contract notifications to start governance
- update earlier.
-- Inner ring processes extended ACL changes.
-- Inner ring makes signature checks of containers and extended ACLs.
-- Refactored config of storage node.
-- Static clients from `morph/client` do not process notary invocations
- explicitly anymore. Now notary support specified at static client creation.
-- Updated neo-go to v0.95.1 release.
-- Updated neofs-api-go to v1.27.0 release.
-
-### Removed
-- Container policy parser moved to neofs-sdk-go repository.
-- Invoke package from inner ring.
-
-## [0.20.0] - 2021-05-21 - Dolsando (돌산도, 突山島)
-
-NeoFS is N3 RC2 compatible.
-
-### Fixed
-- Calculations in EigenTrust algorithm (#527).
-- NPE at object service request forwarding (#532, #543, #544).
-- FSTree iterations in blobstor (#541).
-- Inhume operation in storage engine (#546).
-
-### Added
-- Optional endpoint to main chain in storage app.
-- Client for NeoFSID contract.
-
-### Changed
-- Reorganized and removed plenty of application configuration records
- (#510, #511, #512, #514).
-- Nodes do not resolve remote addresses manually.
-- Presets for basic ACL in CLI are `private` ,`public-read` and
- `public-read-write` now.
-- Updated neo-go to v0.95.0 release.
-- Updated neofs-api-go to v1.26.1 release.
-- Updated go-multiaddr to v0.3.1 release.
-
-### Removed
-- Unused external GC workers (GC is part of the shard in storage engine now).
-- Unused worker pools for object service in storage app.
-- `pkg/errors` dependency (stdlib errors used instead).
-
-## [0.19.0] - 2021-05-07 - Daecheongdo (대청도, 大靑島)
-
-Storage nodes exchange, calculate, aggregate and store reputation information
-in reputation contract. Inner ring nodes support workflows with and without
-notary subsystem in chains.
-
-### Fixed
-- Build with go1.16.
-- Notary deposits last more blocks.
-- TX hashes now prints in little endian in logs.
-- Metabase deletes graves regardless of the presence of objects.
-- SplitInfo error created from all shards instead of first matched shard.
-- Possible deadlock at cache eviction in blobovnicza.
-- Storage node does not send rebootstrap messages after it went offline.
-
-### Added
-- Reputation subsystem that includes reputation collection, exchange,
-calculation and storage components.
-- Notary and non notary workflows in inner ring.
-- Audit fee transfer for inner ring nodes that performed audit.
-- Unified encoding for all side chain payment details.
-- New write cache implementation for storage engine.
-- NEP-2 and NEP-6 key formats in CLI.
-
-### Changed
-- Metabase puts data in batches.
-- Network related new epoch handlers in storage node executed asynchronously.
-- Storage node gets epoch duration from global config.
-- Storage node resign and resend Search, Range, Head, Get requests of object
-service without modification.
-- Inner ring does not sync side chain validators in single chain deployment.
-- neo-go updated to v0.94.1
-- neofs-api-go updated to v1.26.0
-
-## [0.18.0] - 2021-03-26 - Yeongheungdo (영흥도, 靈興島)
-
-NeoFS operates with updated governance model. Alphabet keys and inner ring keys
-are accessed from side chain committee and `RoleManagement` contract. Each epoch
-alphabet keys are synchronized with main chain.
-
-### Fixed
-- Metabase does not store object payloads anymore.
-- TTLNetCache now always evict data after a timeout.
-- NeoFS CLI keyer could misinterpret hex value as base58.
-
-### Added
-- Local trust controller in storage node.
-- Governance processor in inner ring that synchronizes list of alphabet keys.
-
-### Changed
-- Inner ring keys and alphabet keys are managed separately by inner ring and
- gathered from committee and `RoleManagement` contract.
-
-## [0.17.0] - 2021-03-22 - Jebudo (제부도, 濟扶島)
-
-Notary contract support, updated neofs-api-go with raw client, some performance
-tweaks with extra caches and enhanced metrics.
-
-### Added
-- Notary contract support.
-- Cache for morph client.
-- Metrics for object service and storage engine.
-- Makefile target for fast and dirty docker images.
-- GAS threshold value in inner ring GAS transfers.
-
-### Changed
-- RPC client cache now re-used per address instead of (address+key) tuple.
-- Updated neofs-api-go version to v1.25.0 with raw client support.
-- Updated neo-go to testnet compatible v0.94.0 version.
-
-## [0.16.0] - 2021-02-26 - Ganghwado (강화도, 江華島)
-
-Garbage collector is now running inside storage engine. It is accessed
-via Control API, from `policer` component and through object expiration
-scrubbers.
-
-Inner ring configuration now supports single chain mode with any number of
-alphabet contracts.
-
-Storage node now supports NetworkInfo method in netmap service.
-
-### Fixed
-- Storage engine now inhumes object only in single shard.
-- Metabase correctly removes parent data at batched children delete.
-- Metabase does not accept tombstone on tombstone records in graveyard anymore.
-- Object service now rejects expired objects.
-- CLI now correctly attaches bearer token in storage group operations.
-- Container policy parser now works with strings in filter key.
-- Policer component now removes redundant objects locally.
-
-### Added
-- GC job that monitors expired objects.
-- GC job that removes marked objects from physical storage.
-- Batch inhume operations in metabase, shard and engine.
-- `control.DropObjects` RPC method.
-- Support of `netmap.NetworkInfo` RPC method.
-- Single chain inner ring configuration.
-
-### Changed
-- `UN-LOCODE` node attribute now optional.
-- `engine.Delete` method now marks object to be removed by GC.
-- Inner ring node supports any number of alphabet contracts from 1 up to 40.
-
-## [0.15.0] - 2021-02-12 - Seonyudo (선유도, 仙遊島)
-
-NeoFS nodes are now preview5-compatible.
-
-IR nodes are now engaged in the distribution of funds to the storage nodes:
-for the passed audit and for the amount of stored information. All timers
-of the IR nodes related to the generation and processing of global system
-events are decoupled from astronomical time, and are measured in the number
-of blockchain blocks.
-
-For the geographic positioning of storage nodes, a global NeoFS location
-database is now used, the key in which is a UN/LOCODE, and the base itself
-is generated on the basis of the UN/LOCODE and OpenFlights databases.
-
-### Added
-- Timers with time in blocks of the chain.
-- Subscriptions to new blocks in blockchain event `Listener`.
-- Tracking the volume of stored information by containers in the
- storage engine and an external interface for obtaining this data.
-- `TransferX` operation in sidechain client.
-- Calculators of audit and basic settlements.
-- Distribution of funds to storage nodes for audit and for the amount
- of stored information (settlement processors of IR).
-- NeoFS API `Container.AnnounceUsedSpace` RPC service.
-- Exchange of information about container volumes between storage nodes
- controlled by IR through sidechain notifications.
-- Support of new search matchers (`STRING_NOT_EQUAL`, `NOT_PRESENT`).
-- Functional for the formation of NeoFS location database.
-- CLI commands for generating and reading the location database.
-- Checking the locode attribute and generating geographic attributes
- for candidates for a network map on IR side.
-- Verification of the eACL signature when checking Object ACL rules.
-
-### Fixed
-- Overwriting the local configuration of node attributes when updating
- the network map.
-- Ignoring the X-headers CLI `storagegroup` commands.
-- Inability to attach bearer token in CLI `storagegroup` commands.
-
-### Changed
-- Units of epoch and emit IR intervals.
-- Query language in CLI `object search` command.
-
-### Updated
-- neo-go v0.93.0.
-- neofs-api-go v1.23.0.
-
-## [0.14.3] - 2021-01-27
-
-### Fixed
-- Upload of objects bigger than single gRPC message.
-- Inconsistent placement issues (#347, #349).
-- Bug when ACL request classifier failed to classify `RoleOthers` in
- first epoch.
-
-### Added
-- Debug section in readme file for testnet configuration.
-
-### Changed
-- Docker images now based on alpine and contain shell.
-- Node bootstraps with active state in node info structure.
-
-## [0.14.2] - 2021-01-20
-
-Testnet4 related bugfixes.
-
-### Fixed
-- Default values for blobovnicza object size limit and blobstor small object
- size are not zero.
-- Various storage engine log messages.
-- Bug when inner ring node ignored bootstrap messages from restarted storage
- nodes.
-
-### Added
-- Timeout for reading boltDB files at storage node initialization.
-
-### Changed
-- Increased default extra GAS fee for contract invocations at inner ring.
-
-## [0.14.1] - 2021-01-15
-
-### Fixed
-
-- Inner ring node could not confirm `netmap.updateState` notification.
-- `object.RangeHash` method ignored salt values.
-
-### Added
-
-- Control API service for storage node with health check, netmap and node state
- relate methods.
-- Object service now looks to previous epoch containers.
-- Possibility to configure up multiple NEO RPC endpoints in storage node.
-
-### Changed
-
-- Storage node shuts down if event producer RPC node is down.
-
-## [0.14.0] - 2020-12-30 - Yeouido (여의도, 汝矣島)
-
-Preview4 compatible NeoFS nodes with data audit.
-
-### Added
-- Data audit routines in inner ring nodes.
-- Storage group operations in CLI (`neofs-cli storagegroup --help`).
-
-### Fixed
-- Loss of request X-headers during the forwarding in Object service.
-
-### Changed
-- Updated neo-go version for preview4 compatibility.
-
-### Updated
-- neo-go v0.92.0.
-- neofs-api-go v1.22.0.
-
-## [0.13.2] - 2020-12-24
-
-Support changes from neofs-api-go v1.21.2 release.
-
-### Added
-
-- Support of request X-Headers in CLI commands.
-
-### Changed
-
-- Use updated API of container library.
-
-## [0.13.1] - 2020-12-18
-
-Fixes based on Modo release testing results.
-
-### Added
-
-- Verification of chain element addresses during object assembling.
-
-### Changed
-
-- Processing of filters by non-address fields in Object Range/RangeHash/Delete.
-
-### Fixed
-
-- `Graveyard` and `ToMoveIt` bucket names in metabase.
-- Double formation of the parent title when transforming an object.
-- Loss of session token during Object Put.
-- Potential generating Range requests inside Get request execution context.
-
-## [0.13.0] - 2020-12-15 - Modo (모도, 茅島)
-
-Implementation of a local object storage engine.
-Adaptation of the object service work scheme for the engine.
-
-### Changed
-
-- Object format after transformations.
-- Handling of object operations.
-
-### Added
-
-- Local storage components: `Engine`, `Shard`, `BlobStor`,
- `Metabase`, `Blobovnicza`.
-- Support of voting for sidechain governance in IR node.
-- `Raw` flag support in Object Get/Head/GetRange CLI commands.
-
-### Fixed
-
-- Ignoring object address from session token in eACL validation.
-
-## [0.12.1] - 2020-11-25
-
-Bugfixes and small performance improvements.
-
-### Fixed
-
-- Routine leak by adding SDK client cache. (#184)
-- Variety of ACL bugs. (#180, #190, #209)
-- Policer tried to replicate virtual objects. (#182)
-- Search queries with object ID field. (#177)
-- Bug with extended ACL signature check in neofs-cli (#206)
-
-### Added
-
-- More debug logs in object service.
-- Dial timeouts in object service config (`NEOFS_OBJECT_PUT_DIAL_TIMEOUT=5s`)
-
-### Changed
-
-- Routine pools in object service are non-blocking now.
-- Container service now returns error if extended ACL is not set.
-
-## [0.12.0] - 2020-11-17
-
-NeoFS-API v2.0 support and updated brand-new storage node application.
-
-### Fixed
-
-- SetConfig method invocation of netmap contract. (#147)
-- Balance response overflow. (#122)
-
-### Added
-
-- Gas emission routine in inner ring nodes.
-- GRPC reflection service. (`NEOFS_GRPC_ENABLE_REFLECT_SERVICE=true`)
-- New netmap query language parser.
-
-### Changed
-
-- Storage node application rebuilt from scratch.
-- CLI supports accounting, object and container related operations.
-- Inner ring node shutdowns on neo RPC node connection drop.
-- Updated to preview4 compatible neo-go version.
-
-## [0.11.0] - 2020-07-23
-
-### Added
-
-- Inner ring application to repository.
-- Inner ring epoch processor.
-- Inner ring asset processor for GAS deposit and withdraw.
-
-### Changed
-
-- The structure of source code tree.
-
-## [0.10.0] - 2020-07-10
-
-First public review release.
-[Unreleased]: https://github.com/nspcc-dev/neofs-node/compare/v0.35.0...master
-[0.35.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.34.0...v0.35.0
-[0.34.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.33.0...v0.34.0
-[0.33.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.32.0...v0.33.0
-[0.32.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.31.0...v0.32.0
-[0.31.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.2...v0.31.0
-[0.30.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.1...v0.30.2
-[0.30.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.30.0...v0.30.1
-[0.30.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.29.0...v0.30.0
-[0.29.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.3...v0.29.0
-[0.28.3]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.2...v0.28.3
-[0.28.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.1...v0.28.2
-[0.28.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.0...v0.28.1
-[0.28.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.0-rc.3...v0.28.0
-[0.28.0-rc.3]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.0-rc.2...v0.28.0-rc.3
-[0.28.0-rc.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.28.0-rc.1...v0.28.0-rc.2
-[0.28.0-rc.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.7...v0.28.0-rc.1
-[0.27.7]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.6...v0.27.7
-[0.27.6]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.5...v0.27.6
-[0.27.5]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.4...v0.27.5
-[0.27.4]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.3...v0.27.4
-[0.27.3]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.2...v0.27.3
-[0.27.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.1...v0.27.2
-[0.27.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.27.0...v0.27.1
-[0.27.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.26.1...v0.27.0
-[0.26.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.26.0...v0.26.1
-[0.26.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.25.1...v0.26.0
-[0.25.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.25.0...v0.25.1
-[0.25.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.24.1...v0.25.0
-[0.24.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.24.0...v0.24.1
-[0.24.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.23.1...v0.24.0
-[0.23.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.23.0...v0.23.1
-[0.23.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.22.3...v0.23.0
-[0.22.3]: https://github.com/nspcc-dev/neofs-node/compare/v0.22.2...v0.22.3
-[0.22.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.22.1...v0.22.2
-[0.22.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.22.0...v0.22.1
-[0.22.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.21.1...v0.22.0
-[0.21.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.21.0...v0.21.1
-[0.21.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.20.0...v0.21.0
-[0.20.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.19.0...v0.20.0
-[0.19.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.18.0...v0.19.0
-[0.18.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.17.0...v0.18.0
-[0.17.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.16.0...v0.17.0
-[0.16.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.15.0...v0.16.0
-[0.15.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.14.3...v0.15.0
-[0.14.3]: https://github.com/nspcc-dev/neofs-node/compare/v0.14.2...v0.14.3
-[0.14.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.14.1...v0.14.2
-[0.14.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.14.0...v0.14.1
-[0.14.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.13.2...v0.14.0
-[0.13.2]: https://github.com/nspcc-dev/neofs-node/compare/v0.13.1...v0.13.2
-[0.13.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.13.0...v0.13.1
-[0.13.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.12.1...v0.13.0
-[0.12.1]: https://github.com/nspcc-dev/neofs-node/compare/v0.12.0...v0.12.1
-[0.12.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.11.0...v0.12.0
-[0.11.0]: https://github.com/nspcc-dev/neofs-node/compare/v0.10.0...v0.11.0
+[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-node/compare/98e48b68514127afc291b8a8ff6b12838ed1cb5c...master
From c85a0bc86601d4820bab4357f1396f7c066f26de Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 11 Apr 2023 20:20:32 +0300
Subject: [PATCH 0082/1943] [#236] blobstor/test: Reduce test descriptions
I tried to add 4 more tests and suddenly, it became harder to navigate in
code. Move directory creation in a common function.
Signed-off-by: Evgenii Stratonikov
---
.../blobstor/perf_test.go | 84 ++++++++-----------
1 file changed, 35 insertions(+), 49 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 0fd3048d3..a593e1bf4 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -13,60 +13,65 @@ import (
"github.com/stretchr/testify/require"
)
+type storage struct {
+ desc string
+ create func(string) common.Storage
+}
+
+func (s storage) open(b *testing.B) common.Storage {
+ dir, err := os.MkdirTemp(os.TempDir(), s.desc)
+ if err != nil {
+ b.Fatalf("creating %s root path: %v", s.desc, err)
+ }
+ st := s.create(dir)
+
+ require.NoError(b, st.Open(false))
+ require.NoError(b, st.Init())
+
+ b.Cleanup(func() {
+ require.NoError(b, st.Close())
+ require.NoError(b, os.RemoveAll(dir))
+ })
+
+ return st
+}
+
// The storages to benchmark. Each storage has a description and a function which returns the actual
// storage along with a cleanup function.
-var storages = []struct {
- desc string
- create func(*testing.B) (common.Storage, func())
-}{
+var storages = []storage{
{
desc: "memstore",
- create: func(*testing.B) (common.Storage, func()) {
- return memstore.New(), func() {}
+ create: func(string) common.Storage {
+ return memstore.New()
},
},
{
desc: "fstree_nosync",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "fstree_nosync")
- if err != nil {
- b.Fatalf("creating fstree_nosync root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ create: func(dir string) common.Storage {
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
fstree.WithNoSync(true),
- ), cleanup
+ )
},
},
{
desc: "fstree",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "fstree")
- if err != nil {
- b.Fatalf("creating fstree root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ create: func(dir string) common.Storage {
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
- ), cleanup
+ )
},
},
{
desc: "blobovniczatree",
- create: func(b *testing.B) (common.Storage, func()) {
- dir, err := os.MkdirTemp(os.TempDir(), "blobovniczatree")
- if err != nil {
- b.Fatalf("creating blobovniczatree root path: %v", err)
- }
- cleanup := func() { os.RemoveAll(dir) }
+ create: func(dir string) common.Storage {
return blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithRootPath(dir),
- ), cleanup
+ )
},
},
}
@@ -95,10 +100,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
// Fill database
for i := 0; i < tt.size; i++ {
@@ -123,9 +125,6 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
require.NoError(b, err)
}
})
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
@@ -150,10 +149,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
@@ -170,9 +166,6 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
}
}
})
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
@@ -194,10 +187,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
- st, cleanup := stEntry.create(b)
-
- require.NoError(b, st.Open(false))
- require.NoError(b, st.Init())
+ st := stEntry.open(b)
// Fill database
for i := 0; i < tt.size; i++ {
@@ -224,10 +214,6 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
})
require.NoError(b, err)
require.Equal(b, tt.size, cnt)
- b.StopTimer()
-
- require.NoError(b, st.Close())
- cleanup()
})
}
}
From 6ad5c38225adba16946991a68eb12a79bd3cf7e3 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 11 Apr 2023 20:30:27 +0300
Subject: [PATCH 0083/1943] [#236] testutil: Use random object id in
`RandObjGenerator`
Before this commit it was like this:
```
BenchmarkSubstorageWritePerf/memstore-rand10-8 227425 4859 ns/op
BenchmarkSubstorageWritePerf/fstree_nosync-rand10-8 --- FAIL: BenchmarkSubstorageWritePerf/fstree_nosync-rand10-8
perf_test.go:165: writing entry: file exists
perf_test.go:165: writing entry: file exists
perf_test.go:165: writing entry: file exists
BenchmarkSubstorageWritePerf/fstree-rand10-8 --- FAIL: BenchmarkSubstorageWritePerf/fstree-rand10-8
perf_test.go:165: writing entry: file exists
perf_test.go:165: writing entry: file exists
perf_test.go:165: writing entry: file exists
```
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/internal/testutil/generators.go | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
index a45ea72aa..4ee92db88 100644
--- a/pkg/local_object_storage/internal/testutil/generators.go
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -81,7 +81,9 @@ type RandObjGenerator struct {
var _ ObjectGenerator = &RandObjGenerator{}
func (g *RandObjGenerator) Next() *object.Object {
- return generateObjectWithOIDWithCIDWithSize(oid.ID{}, cid.ID{}, g.ObjSize)
+ var id oid.ID
+ rand.Read(id[:])
+ return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
}
// OverwriteObjGenerator is an ObjectGenerator that generates entries with random payloads of size objSize and at most maxObjects distinct IDs.
From c4865783fc272f4219c075a14b897ffc2845b59d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 11 Apr 2023 20:51:58 +0300
Subject: [PATCH 0084/1943] [#236] blobstor/test: Prefill storage in parallel
in read benchmark
`blobovniczatree` takes a really long time to prefill, because each
batch takes at least 10ms, so for 10k iterations we have at least 100s of
prefill.
Signed-off-by: Evgenii Stratonikov
---
.../blobstor/perf_test.go | 22 ++++++++++++-------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index a593e1bf4..0351eb56e 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
)
type storage struct {
@@ -103,18 +104,23 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
st := stEntry.open(b)
// Fill database
+ var errG errgroup.Group
for i := 0; i < tt.size; i++ {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
- raw, err := obj.Marshal()
- require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
- Address: addr,
- RawData: raw,
- }); err != nil {
- b.Fatalf("writing entry: %v", err)
- }
+ errG.Go(func() error {
+ raw, err := obj.Marshal()
+ if err != nil {
+ return fmt.Errorf("marshal: %v", err)
+ }
+ _, err = st.Put(common.PutPrm{
+ Address: addr,
+ RawData: raw,
+ })
+ return err
+ })
}
+ require.NoError(b, errG.Wait())
// Benchmark reading
addrGen := tt.addrGen()
From 72565a91ef67e093aff9200f97f70651c05da31f Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 11 Apr 2023 11:59:14 +0300
Subject: [PATCH 0085/1943] [#135] node: Update api-go version
Signed-off-by: Dmitrii Stepanov
---
go.mod | 26 +++++++++++++++------
go.sum | 71 +++++++++++++++++++++++++++++++++++++++++++++++-----------
2 files changed, 77 insertions(+), 20 deletions(-)
diff --git a/go.mod b/go.mod
index 6d9f04030..af85e69d5 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module git.frostfs.info/TrueCloudLab/frostfs-node
go 1.18
require (
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.0
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230316081442-bec77f280a85
git.frostfs.info/TrueCloudLab/hrw v1.2.0
@@ -29,14 +29,14 @@ require (
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
- github.com/stretchr/testify v1.8.1
+ github.com/stretchr/testify v1.8.2
go.etcd.io/bbolt v1.3.6
go.uber.org/atomic v1.10.0
go.uber.org/zap v1.24.0
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
golang.org/x/sync v0.1.0
- golang.org/x/term v0.3.0
- google.golang.org/grpc v1.52.0
+ golang.org/x/term v0.5.0
+ google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.28.1
gopkg.in/yaml.v3 v3.0.1
)
@@ -47,15 +47,19 @@ require (
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/go-logr/logr v1.2.3 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
@@ -90,12 +94,20 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
github.com/twmb/murmur3 v1.1.5 // indirect
github.com/urfave/cli v1.22.5 // indirect
+ go.opentelemetry.io/otel v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.14.0 // indirect
+ go.opentelemetry.io/otel/trace v1.14.0 // indirect
+ go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
- golang.org/x/net v0.4.0 // indirect
+ golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
- google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
+ google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)
diff --git a/go.sum b/go.sum
index b6f1a1e0b..f65a7aeab 100644
--- a/go.sum
+++ b/go.sum
@@ -36,8 +36,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703 h1:lxe0DtZq/uFZVZu9apx6OcIXCJskQBMd/GVeYGKA3wA=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703/go.mod h1:gRd5iE5A84viily6AcNBsSlTx2XgoWrwRDz7z0MayDQ=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.0 h1:oZ0/KiaFeveXRLi5VVEpuLSHczeFyWx4HDl9wTJUtsE=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.0/go.mod h1:sPyITTmQT662ZI38ud2aoE1SUCAr1mO5xV8P4nzLkKI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -56,6 +56,7 @@ github.com/CityOfZion/neo-go v0.62.1-pre.0.20191114145240-e740fbe708f8/go.mod h1
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191209120015-fccb0085941e/go.mod h1:0enZl0az8xA6PVkwzEOwPWVJGqlt/GO4hA4kmQ5Xzig=
github.com/CityOfZion/neo-go v0.70.1-pre.0.20191212173117-32ac01130d4c/go.mod h1:JtlHfeqLywZLswKIKFnAp+yzezY4Dji9qlfQKB2OD/I=
github.com/CityOfZion/neo-go v0.71.1-pre.0.20200129171427-f773ec69fb84/go.mod h1:FLI526IrRWHmcsO+mHsCbj64pJZhwQFTLJZu+A4PGOA=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA=
github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
github.com/abiosoft/ishell/v2 v2.0.2/go.mod h1:E4oTCXfo6QjoCart0QYa5m9w4S+deXs/P/9jA77A9Bs=
@@ -90,11 +91,15 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
+github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
+github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -110,7 +115,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
@@ -129,6 +138,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
@@ -156,12 +166,19 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -239,6 +256,8 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@@ -445,6 +464,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
@@ -470,8 +490,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
@@ -500,14 +521,30 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
+go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 h1:sEL90JjOO/4yhquXl5zTAkLLsZ5+MycAgX99SDsxGc8=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0/go.mod h1:oCslUcizYdpKYyS9e8srZEqM6BB8fq41VJBjLAE6z1w=
+go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
+go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
+go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
+go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
@@ -607,13 +644,14 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -624,6 +662,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -692,8 +731,10 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -708,14 +749,15 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
-golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
@@ -846,8 +888,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
-google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
+google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -866,9 +909,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
-google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
-google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
From 5af9f5846981b7262ce941c417e470059ada8937 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 13 Mar 2023 14:01:43 +0300
Subject: [PATCH 0086/1943] [#135] tracing: Add tracing to node gRPC endpoints
Signed-off-by: Dmitrii Stepanov
---
CHANGELOG.md | 1 +
cmd/frostfs-cli/modules/tree/client.go | 12 +++++++--
cmd/frostfs-node/config.go | 9 +++++++
cmd/frostfs-node/config/tracing/config.go | 31 +++++++++++++++++++++++
cmd/frostfs-node/grpc.go | 7 +++++
cmd/frostfs-node/main.go | 2 ++
cmd/frostfs-node/tracing.go | 31 +++++++++++++++++++++++
config/example/node.env | 4 +++
config/example/node.json | 5 ++++
config/example/node.yaml | 6 +++++
pkg/services/tree/cache.go | 12 +++++++--
pkg/services/tree/sync.go | 10 +++++++-
12 files changed, 125 insertions(+), 5 deletions(-)
create mode 100644 cmd/frostfs-node/config/tracing/config.go
create mode 100644 cmd/frostfs-node/tracing.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index db846936f..0e07bb2f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -15,6 +15,7 @@ Changelog for FrostFS Node
- Multiple configs support (#44)
- Parameters `nns-name` and `nns-zone` for command `frostfs-cli container create` (#37)
- Tree service now saves the last synchronization height which persists across restarts (#82)
+- Add tracing support (#135)
### Changed
- Change `frostfs_node_engine_container_size` to counting sizes of logical objects
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index f379de41b..f25bff166 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -5,6 +5,7 @@ import (
"strings"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
@@ -22,8 +23,15 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
return nil, err
}
- opts := make([]grpc.DialOption, 1, 2)
- opts[0] = grpc.WithBlock()
+ opts := []grpc.DialOption{
+ grpc.WithBlock(),
+ grpc.WithChainUnaryInterceptor(
+ tracing.NewGRPCUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ tracing.NewGRPCStreamClientInterceptor(),
+ ),
+ }
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ab615d340..d81e47b17 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -16,6 +16,7 @@ import (
"time"
netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient"
contractsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/contracts"
@@ -27,6 +28,7 @@ import (
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
+ tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -1055,6 +1057,13 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}
components = append(components, dCmp{"logger", logPrm.Reload})
+ components = append(components, dCmp{"tracing", func() error {
+ updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
+ if updated {
+ c.log.Info("tracing configation updated")
+ }
+ return err
+ }})
if cmp, updated := metricsComponent(c); updated {
if cmp.enabled {
cmp.preReload = enableMetricsSvc
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
new file mode 100644
index 000000000..76572cc31
--- /dev/null
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -0,0 +1,31 @@
+package tracing
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/misc"
+)
+
+const (
+ subsection = "tracing"
+)
+
+// ToTracingConfig extracts tracing config.
+func ToTracingConfig(c *config.Config) *tracing.Config {
+ return &tracing.Config{
+ Enabled: config.BoolSafe(c.Sub(subsection), "enabled"),
+ Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")),
+ Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"),
+ Service: "frostfs-node",
+ InstanceID: getInstanceIDOrDefault(c),
+ Version: misc.Version,
+ }
+}
+
+func getInstanceIDOrDefault(c *config.Config) string {
+ s := config.StringSlice(c.Sub("node"), "addresses")
+ if len(s) > 0 {
+ return s[0]
+ }
+ return ""
+}
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index a56c76606..f3943f3ff 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -7,6 +7,7 @@ import (
"net"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
@@ -19,6 +20,12 @@ func initGRPC(c *cfg) {
grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) {
serverOpts := []grpc.ServerOption{
grpc.MaxSendMsgSize(maxMsgSize),
+ grpc.ChainUnaryInterceptor(
+ tracing.NewGRPCUnaryServerInterceptor(),
+ ),
+ grpc.ChainStreamInterceptor(
+ tracing.NewGRPCStreamServerInterceptor(),
+ ),
}
tlsCfg := sc.TLS()
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index cddedabe9..2f4c9853f 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -87,6 +87,8 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(c, pprof.name, pprof.init)
initAndLog(c, metrics.name, metrics.init)
+ initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) })
+
initLocalStorage(c)
initAndLog(c, "storage engine", func(c *cfg) {
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
new file mode 100644
index 000000000..bbdb71c64
--- /dev/null
+++ b/cmd/frostfs-node/tracing.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "context"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
+ "go.uber.org/zap"
+)
+
+func initTracing(ctx context.Context, c *cfg) {
+ conf := tracingconfig.ToTracingConfig(c.appCfg)
+
+ _, err := tracing.Setup(ctx, *conf)
+ if err != nil {
+ c.log.Error("failed init tracing", zap.Error(err))
+ }
+
+ c.closers = append(c.closers, closer{
+ name: "tracing",
+ fn: func() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+ err := tracing.Shutdown(ctx) //cfg context cancels before close
+ if err != nil {
+ c.log.Error("failed shutdown tracing", zap.Error(err))
+ }
+ },
+ })
+}
diff --git a/config/example/node.env b/config/example/node.env
index 8034fbb23..9a1a8b052 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -184,3 +184,7 @@ FROSTFS_STORAGE_SHARD_1_PILORAMA_MAX_BATCH_SIZE=100
FROSTFS_STORAGE_SHARD_1_GC_REMOVER_BATCH_SIZE=200
#### Sleep interval between data remover tacts
FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
+
+FROSTFS_TRACING_ENABLED=true
+FROSTFS_TRACING_ENDPOINT="localhost"
+FROSTFS_TRACING_EXPORTER="otlp_grpc"
\ No newline at end of file
diff --git a/config/example/node.json b/config/example/node.json
index e7bb375a5..8cfb5bb69 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -243,5 +243,10 @@
}
}
}
+ },
+ "tracing": {
+ "enabled": true,
+ "endpoint": "localhost:9090",
+ "exporter": "otlp_grpc"
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 6a5ea5f03..e3b41d413 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -214,3 +214,9 @@ storage:
path: tmp/1/blob/pilorama.db
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
perm: 0644 # permission to use for the database file and intermediate directories
+
+tracing:
+ enabled: true
+ exporter: "otlp_grpc"
+ endpoint: "localhost"
+
\ No newline at end of file
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 73745e1b1..ab9f509ac 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
@@ -84,8 +85,15 @@ func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn,
return nil, err
}
- opts := make([]grpc.DialOption, 1, 2)
- opts[0] = grpc.WithBlock()
+ opts := []grpc.DialOption{
+ grpc.WithBlock(),
+ grpc.WithChainUnaryInterceptor(
+ tracing.NewGRPCUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ tracing.NewGRPCStreamClientInterceptor(),
+ ),
+ }
// FIXME(@fyrchik): ugly hack #1322
if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") {
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 32d088c01..47299d1c9 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -10,6 +10,7 @@ import (
"math/rand"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -146,7 +147,14 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return false
}
- cc, err := grpc.DialContext(egCtx, a.URIAddr(), grpc.WithTransportCredentials(insecure.NewCredentials()))
+ cc, err := grpc.DialContext(egCtx, a.URIAddr(),
+ grpc.WithChainUnaryInterceptor(
+ tracing.NewGRPCUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ tracing.NewGRPCStreamClientInterceptor(),
+ ),
+ grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
// Failed to connect, try the next address.
return false
From 0920d848d0ee8e069b95c8db5bf1043991221d8d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 13 Mar 2023 14:37:35 +0300
Subject: [PATCH 0087/1943] [#135] get-object: Add tracing spans
Signed-off-by: Dmitrii Stepanov
---
.../internal/blobovnicza/inspect.go | 2 +-
cmd/frostfs-node/main.go | 2 +-
cmd/frostfs-node/notificator.go | 11 +++--
go.mod | 6 +--
.../blobovnicza/blobovnicza_test.go | 3 +-
pkg/local_object_storage/blobovnicza/get.go | 12 ++++-
.../blobovnicza/get_test.go | 3 +-
.../blobstor/blobovniczatree/exists.go | 16 ++++++-
.../blobstor/blobovniczatree/exists_test.go | 5 +-
.../blobstor/blobovniczatree/get.go | 31 +++++++++----
.../blobstor/blobovniczatree/get_range.go | 33 +++++++++----
.../blobstor/blobstor_test.go | 5 +-
.../blobstor/common/storage.go | 12 +++--
pkg/local_object_storage/blobstor/exists.go | 21 +++++++--
.../blobstor/exists_test.go | 9 ++--
.../blobstor/fstree/fstree.go | 46 ++++++++++++++++---
pkg/local_object_storage/blobstor/get.go | 21 +++++++--
.../blobstor/get_range.go | 23 ++++++++--
.../blobstor/internal/blobstortest/control.go | 3 +-
.../blobstor/internal/blobstortest/delete.go | 9 ++--
.../blobstor/internal/blobstortest/exists.go | 7 +--
.../blobstor/internal/blobstortest/get.go | 9 ++--
.../internal/blobstortest/get_range.go | 15 +++---
.../blobstor/memstore/memstore.go | 9 ++--
.../blobstor/memstore/memstore_test.go | 9 ++--
.../blobstor/perf_test.go | 3 +-
.../blobstor/teststore/teststore.go | 13 +++---
.../engine/control_test.go | 6 +--
pkg/local_object_storage/engine/delete.go | 2 +-
.../engine/delete_test.go | 2 +-
pkg/local_object_storage/engine/error_test.go | 20 ++++----
pkg/local_object_storage/engine/evacuate.go | 19 ++++----
.../engine/evacuate_test.go | 20 ++++----
pkg/local_object_storage/engine/exists.go | 3 +-
pkg/local_object_storage/engine/get.go | 32 ++++++++-----
pkg/local_object_storage/engine/head.go | 21 +++++----
pkg/local_object_storage/engine/head_test.go | 3 +-
pkg/local_object_storage/engine/inhume.go | 2 +-
pkg/local_object_storage/engine/lock.go | 3 +-
pkg/local_object_storage/engine/put.go | 7 +--
pkg/local_object_storage/engine/range.go | 35 +++++++++-----
.../engine/remove_copies.go | 2 +-
.../shard/control_test.go | 10 ++--
pkg/local_object_storage/shard/delete_test.go | 7 +--
pkg/local_object_storage/shard/dump_test.go | 3 +-
pkg/local_object_storage/shard/exists.go | 6 ++-
pkg/local_object_storage/shard/gc_test.go | 3 +-
pkg/local_object_storage/shard/get.go | 18 ++++++--
pkg/local_object_storage/shard/get_test.go | 5 +-
pkg/local_object_storage/shard/head.go | 17 ++++++-
pkg/local_object_storage/shard/head_test.go | 7 +--
pkg/local_object_storage/shard/inhume_test.go | 2 +-
pkg/local_object_storage/shard/lock_test.go | 3 +-
pkg/local_object_storage/shard/range.go | 22 +++++++--
pkg/local_object_storage/shard/range_test.go | 3 +-
pkg/local_object_storage/shard/reload_test.go | 2 +-
pkg/local_object_storage/shard/shard_test.go | 1 +
.../shard/shutdown_test.go | 3 +-
.../writecache/flush_test.go | 11 +++--
pkg/local_object_storage/writecache/get.go | 25 ++++++++--
pkg/local_object_storage/writecache/init.go | 3 +-
.../writecache/options.go | 3 +-
.../writecache/writecache.go | 5 +-
pkg/services/control/server/evacuate.go | 4 +-
pkg/services/notificator/deps.go | 4 +-
pkg/services/notificator/service.go | 5 +-
pkg/services/object/acl/eacl/v2/eacl_test.go | 3 +-
pkg/services/object/acl/eacl/v2/headers.go | 5 +-
pkg/services/object/acl/eacl/v2/localstore.go | 5 +-
pkg/services/object/get/get_test.go | 2 +-
pkg/services/object/get/local.go | 8 +++-
pkg/services/object/get/remote.go | 4 ++
pkg/services/object/get/service.go | 2 +-
pkg/services/object/get/util.go | 8 ++--
pkg/services/object/get/v2/get_forwarder.go | 8 ++++
.../object/get/v2/get_range_forwarder.go | 8 ++++
pkg/services/object/get/v2/head_forwarder.go | 8 ++++
pkg/services/object/get/v2/service.go | 2 +-
pkg/services/object/get/v2/util.go | 2 +-
pkg/services/replicator/process.go | 2 +-
80 files changed, 523 insertions(+), 231 deletions(-)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
index 3f4e8cfe4..13442a4b8 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go
@@ -33,7 +33,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
var prm blobovnicza.GetPrm
prm.SetAddress(addr)
- res, err := blz.Get(prm)
+ res, err := blz.Get(cmd.Context(), prm)
common.ExitOnErr(cmd, common.Errf("could not fetch object: %w", err))
data := res.Object()
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index 2f4c9853f..a97ad3879 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -102,7 +102,7 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
initAndLog(c, "reputation", func(c *cfg) { initReputationService(ctx, c) })
- initAndLog(c, "notification", initNotifications)
+ initAndLog(c, "notification", func(c *cfg) { initNotifications(ctx, c) })
initAndLog(c, "object", initObjectService)
initAndLog(c, "tree", initTreeService)
initAndLog(c, "control", initControlService)
diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go
index d5cb1ded4..4a310e5b0 100644
--- a/cmd/frostfs-node/notificator.go
+++ b/cmd/frostfs-node/notificator.go
@@ -23,7 +23,7 @@ type notificationSource struct {
defaultTopic string
}
-func (n *notificationSource) Iterate(epoch uint64, handler func(topic string, addr oid.Address)) {
+func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler func(topic string, addr oid.Address)) {
log := n.l.With(zap.Uint64("epoch", epoch))
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
@@ -51,7 +51,7 @@ func (n *notificationSource) Iterate(epoch uint64, handler func(topic string, ad
}
for _, a := range selectRes.AddressList() {
- err = n.processAddress(a, handler)
+ err = n.processAddress(ctx, a, handler)
if err != nil {
log.Error("notificator: could not process object",
zap.Stringer("address", a),
@@ -66,13 +66,14 @@ func (n *notificationSource) Iterate(epoch uint64, handler func(topic string, ad
}
func (n *notificationSource) processAddress(
+ ctx context.Context,
a oid.Address,
h func(topic string, addr oid.Address),
) error {
var prm engine.HeadPrm
prm.WithAddress(a)
- res, err := n.e.Head(prm)
+ res, err := n.e.Head(ctx, prm)
if err != nil {
return err
}
@@ -108,7 +109,7 @@ func (n notificationWriter) Notify(topic string, address oid.Address) {
}
}
-func initNotifications(c *cfg) {
+func initNotifications(ctx context.Context, c *cfg) {
if nodeconfig.Notification(c.appCfg).Enabled() {
topic := nodeconfig.Notification(c.appCfg).DefaultTopic()
pubKey := hex.EncodeToString(c.cfgNodeInfo.localInfo.PublicKey())
@@ -151,7 +152,7 @@ func initNotifications(c *cfg) {
addNewEpochAsyncNotificationHandler(c, func(e event.Event) {
ev := e.(netmap.NewEpoch)
- n.ProcessEpoch(ev.EpochNumber())
+ n.ProcessEpoch(ctx, ev.EpochNumber())
})
}
}
diff --git a/go.mod b/go.mod
index af85e69d5..301be6934 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,6 @@ require (
github.com/multiformats/go-multiaddr v0.8.0
github.com/nats-io/nats.go v1.22.1
github.com/nspcc-dev/neo-go v0.100.1
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.4.0
github.com/paulmach/orb v0.2.2
@@ -31,6 +30,8 @@ require (
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.2
go.etcd.io/bbolt v1.3.6
+ go.opentelemetry.io/otel v1.14.0
+ go.opentelemetry.io/otel/trace v1.14.0
go.uber.org/atomic v1.10.0
go.uber.org/zap v1.24.0
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2
@@ -80,6 +81,7 @@ require (
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -94,13 +96,11 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
github.com/twmb/murmur3 v1.1.5 // indirect
github.com/urfave/cli v1.22.5 // indirect
- go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 // indirect
go.opentelemetry.io/otel/sdk v1.14.0 // indirect
- go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.4.0 // indirect
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index 4499c5d14..853628fb4 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -1,6 +1,7 @@
package blobovnicza
import (
+ "context"
"errors"
"math/rand"
"os"
@@ -39,7 +40,7 @@ func testGet(t *testing.T, blz *Blobovnicza, addr oid.Address, expObj []byte, as
pGet.SetAddress(addr)
// try to read object from Blobovnicza
- res, err := blz.Get(pGet)
+ res, err := blz.Get(context.Background(), pGet)
if assertErr != nil {
require.True(t, assertErr(err))
} else {
diff --git a/pkg/local_object_storage/blobovnicza/get.go b/pkg/local_object_storage/blobovnicza/get.go
index 776f08d2b..c1cd19e53 100644
--- a/pkg/local_object_storage/blobovnicza/get.go
+++ b/pkg/local_object_storage/blobovnicza/get.go
@@ -1,12 +1,16 @@
package blobovnicza
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@@ -39,7 +43,13 @@ var errInterruptForEach = errors.New("interrupt for-each")
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is not
// presented in Blobovnicza.
-func (b *Blobovnicza) Get(prm GetPrm) (GetRes, error) {
+func (b *Blobovnicza) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
var (
data []byte
addrKey = addressKey(prm.addr)
diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go
index 98097e9c8..ad30e8d94 100644
--- a/pkg/local_object_storage/blobovnicza/get_test.go
+++ b/pkg/local_object_storage/blobovnicza/get_test.go
@@ -1,6 +1,7 @@
package blobovnicza
import (
+ "context"
"os"
"path/filepath"
"testing"
@@ -56,7 +57,7 @@ func TestBlobovnicza_Get(t *testing.T) {
prmGet.SetAddress(addr)
checkObj := func() {
- res, err := blz.Get(prmGet)
+ res, err := blz.Get(context.Background(), prmGet)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index e13e49351..748843ee9 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -1,15 +1,27 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Exists implements common.Storage.
-func (b *Blobovniczas) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String())
@@ -32,7 +44,7 @@ func (b *Blobovniczas) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
_, ok := activeCache[dirPath]
- _, err := b.getObjectFromLevel(gPrm, p, !ok)
+ _, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from level",
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index 4f466a81a..08fd2223f 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -1,6 +1,7 @@
package blobovniczatree
import (
+ "context"
"os"
"path/filepath"
"testing"
@@ -44,7 +45,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
storageID[0]--
}
- res, err := b.Exists(common.ExistsPrm{Address: addr, StorageID: storageID})
+ res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID})
require.NoError(t, err)
require.False(t, res.Exists)
})
@@ -57,7 +58,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
require.NoError(t, os.Chmod(badDir, 0))
t.Cleanup(func() { _ = os.Chmod(filepath.Join(dir, "9"), os.ModePerm) })
- res, err := b.Exists(common.ExistsPrm{Address: addr, StorageID: storageID})
+ res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID})
require.Error(t, err)
require.False(t, res.Exists)
})
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index 89ea9b641..bb84db086 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -1,14 +1,19 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"fmt"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,7 +21,15 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) Get(prm common.GetPrm) (res common.GetRes, err error) {
+func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.GetRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.Bool("raw", prm.Raw),
+ ))
+ defer span.End()
+
var bPrm blobovnicza.GetPrm
bPrm.SetAddress(prm.Address)
@@ -27,7 +40,7 @@ func (b *Blobovniczas) Get(prm common.GetPrm) (res common.GetRes, err error) {
return res, err
}
- return b.getObject(blz, bPrm)
+ return b.getObject(ctx, blz, bPrm)
}
activeCache := make(map[string]struct{})
@@ -37,7 +50,7 @@ func (b *Blobovniczas) Get(prm common.GetPrm) (res common.GetRes, err error) {
_, ok := activeCache[dirPath]
- res, err = b.getObjectFromLevel(bPrm, p, !ok)
+ res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from level",
@@ -64,7 +77,7 @@ func (b *Blobovniczas) Get(prm common.GetPrm) (res common.GetRes, err error) {
// tries to read object from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
-func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string, tryActive bool) (common.GetRes, error) {
+func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string, tryActive bool) (common.GetRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
@@ -72,7 +85,7 @@ func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
- if res, err := b.getObject(v, prm); err == nil {
+ if res, err := b.getObject(ctx, v, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not read object from opened blobovnicza",
@@ -92,7 +105,7 @@ func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string
b.activeMtx.RUnlock()
if ok && tryActive {
- if res, err := b.getObject(active.blz, prm); err == nil {
+ if res, err := b.getObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not get object from active blobovnicza",
@@ -117,12 +130,12 @@ func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string
return common.GetRes{}, err
}
- return b.getObject(blz, prm)
+ return b.getObject(ctx, blz, prm)
}
// reads object from blobovnicza and returns GetSmallRes.
-func (b *Blobovniczas) getObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.GetPrm) (common.GetRes, error) {
- res, err := blz.Get(prm)
+func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.GetPrm) (common.GetRes, error) {
+ res, err := blz.Get(ctx, prm)
if err != nil {
return common.GetRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index 29df23944..b12cb32d4 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -1,14 +1,20 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"fmt"
"path/filepath"
+ "strconv"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -16,7 +22,16 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes, err error) {
+func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (res common.GetRangeRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.GetRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
if prm.StorageID != nil {
id := blobovnicza.NewIDFromBytes(prm.StorageID)
blz, err := b.openBlobovnicza(id.String())
@@ -24,7 +39,7 @@ func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes,
return common.GetRangeRes{}, err
}
- return b.getObjectRange(blz, prm)
+ return b.getObjectRange(ctx, blz, prm)
}
activeCache := make(map[string]struct{})
@@ -35,7 +50,7 @@ func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes,
_, ok := activeCache[dirPath]
- res, err = b.getRangeFromLevel(prm, p, !ok)
+ res, err = b.getRangeFromLevel(ctx, prm, p, !ok)
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
@@ -68,7 +83,7 @@ func (b *Blobovniczas) GetRange(prm common.GetRangePrm) (res common.GetRangeRes,
// tries to read range of object payload data from particular blobovnicza.
//
// returns error if object could not be read from any blobovnicza of the same level.
-func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string, tryActive bool) (common.GetRangeRes, error) {
+func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string, tryActive bool) (common.GetRangeRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to read from blobovnicza if it is opened
@@ -76,7 +91,7 @@ func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string,
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
- res, err := b.getObjectRange(v, prm)
+ res, err := b.getObjectRange(ctx, v, prm)
switch {
case err == nil,
isErrOutOfRange(err):
@@ -101,7 +116,7 @@ func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string,
b.activeMtx.RUnlock()
if ok && tryActive {
- res, err := b.getObjectRange(active.blz, prm)
+ res, err := b.getObjectRange(ctx, active.blz, prm)
switch {
case err == nil,
isErrOutOfRange(err):
@@ -131,11 +146,11 @@ func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string,
return common.GetRangeRes{}, err
}
- return b.getObjectRange(blz, prm)
+ return b.getObjectRange(ctx, blz, prm)
}
// reads range of object payload data from blobovnicza and returns GetRangeSmallRes.
-func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.GetRangePrm) (common.GetRangeRes, error) {
+func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blobovnicza, prm common.GetRangePrm) (common.GetRangeRes, error) {
var gPrm blobovnicza.GetPrm
gPrm.SetAddress(prm.Address)
@@ -143,7 +158,7 @@ func (b *Blobovniczas) getObjectRange(blz *blobovnicza.Blobovnicza, prm common.G
// stores data that is compressed on BlobStor side.
// If blobovnicza learns to do the compression itself,
// we can start using GetRange.
- res, err := blz.Get(gPrm)
+ res, err := blz.Get(ctx, gPrm)
if err != nil {
return common.GetRangeRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index df001a365..738cd7eee 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"path/filepath"
"testing"
@@ -62,11 +63,11 @@ func TestCompression(t *testing.T) {
}
testGet := func(t *testing.T, b *BlobStor, i int) {
- res1, err := b.Get(common.GetPrm{Address: object.AddressOf(smallObj[i])})
+ res1, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(smallObj[i])})
require.NoError(t, err)
require.Equal(t, smallObj[i], res1.Object)
- res2, err := b.Get(common.GetPrm{Address: object.AddressOf(bigObj[i])})
+ res2, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(bigObj[i])})
require.NoError(t, err)
require.Equal(t, bigObj[i], res2.Object)
}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index 76dd6d96e..b5d186242 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -1,6 +1,10 @@
package common
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
+)
// Storage represents key-value object storage.
// It is used as a building block for a blobstor of a shard.
@@ -16,9 +20,9 @@ type Storage interface {
// This function MUST be called before Open.
SetReportErrorFunc(f func(string, error))
- Get(GetPrm) (GetRes, error)
- GetRange(GetRangePrm) (GetRangeRes, error)
- Exists(ExistsPrm) (ExistsRes, error)
+ Get(context.Context, GetPrm) (GetRes, error)
+ GetRange(context.Context, GetRangePrm) (GetRangeRes, error)
+ Exists(context.Context, ExistsPrm) (ExistsRes, error)
Put(PutPrm) (PutRes, error)
Delete(DeletePrm) (DeleteRes, error)
Iterate(IteratePrm) (IterateRes, error)
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index 7a5a00957..5882c33e0 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -1,7 +1,13 @@
package blobstor
import (
+ "context"
+ "encoding/hex"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -9,15 +15,22 @@ import (
//
// Returns any error encountered that did not allow
// to completely check object existence.
-func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID != nil {
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.Exists(prm)
+ return b.storage[len(b.storage)-1].Storage.Exists(ctx, prm)
}
- return b.storage[0].Storage.Exists(prm)
+ return b.storage[0].Storage.Exists(ctx, prm)
}
// If there was an error during existence check below,
@@ -31,7 +44,7 @@ func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
// error | error | log the first error, return the second
var errors []error
for i := range b.storage {
- res, err := b.storage[i].Storage.Exists(prm)
+ res, err := b.storage[i].Storage.Exists(ctx, prm)
if err == nil && res.Exists {
return res, nil
} else if err != nil {
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 4bbc256ab..805d78297 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"os"
"testing"
@@ -43,13 +44,13 @@ func TestExists(t *testing.T) {
for i := range objects {
prm.Address = objectCore.AddressOf(objects[i])
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
}
prm.Address = oidtest.Address()
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
@@ -60,13 +61,13 @@ func TestExists(t *testing.T) {
// Object exists, first error is logged.
prm.Address = objectCore.AddressOf(objects[0])
- res, err := b.Exists(prm)
+ res, err := b.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
// Object doesn't exist, first error is returned.
prm.Address = objectCore.AddressOf(objects[1])
- _, err = b.Exists(prm)
+ _, err = b.Exists(context.Background(), prm)
require.Error(t, err)
require.ErrorIs(t, err, teststore.ErrDiskExploded)
})
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 99484860a..462fbd63f 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -1,6 +1,7 @@
package fstree
import (
+ "context"
"crypto/sha256"
"errors"
"fmt"
@@ -11,6 +12,7 @@ import (
"strings"
"syscall"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -19,6 +21,8 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// FSTree represents an object storage as a filesystem tree.
@@ -208,7 +212,13 @@ func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
// Exists returns the path to the file with object contents if it exists in the storage
// and an error otherwise.
-func (t *FSTree) Exists(prm common.ExistsPrm) (common.ExistsRes, error) {
+func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.ExistsRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
p := t.treePath(prm.Address)
_, err := os.Stat(p)
@@ -336,16 +346,30 @@ func (t *FSTree) PutStream(addr oid.Address, handler func(*os.File) error) error
}
// Get returns an object from the storage by address.
-func (t *FSTree) Get(prm common.GetPrm) (common.GetRes, error) {
+func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.Get",
+ trace.WithAttributes(
+ attribute.Bool("raw", prm.Raw),
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
p := t.treePath(prm.Address)
if _, err := os.Stat(p); os.IsNotExist(err) {
return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
}
- data, err := os.ReadFile(p)
- if err != nil {
- return common.GetRes{}, err
+ var data []byte
+ var err error
+ {
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Get.ReadFile")
+ defer span.End()
+
+ data, err = os.ReadFile(p)
+ if err != nil {
+ return common.GetRes{}, err
+ }
}
data, err = t.Decompress(data)
@@ -362,8 +386,16 @@ func (t *FSTree) Get(prm common.GetPrm) (common.GetRes, error) {
}
// GetRange implements common.Storage.
-func (t *FSTree) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error) {
- res, err := t.Get(common.GetPrm{Address: prm.Address})
+func (t *FSTree) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "FSTree.GetRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
+ res, err := t.Get(ctx, common.GetPrm{Address: prm.Address})
if err != nil {
return common.GetRangeRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/get.go b/pkg/local_object_storage/blobstor/get.go
index 6caa61b84..65bc87c07 100644
--- a/pkg/local_object_storage/blobstor/get.go
+++ b/pkg/local_object_storage/blobstor/get.go
@@ -1,23 +1,36 @@
package blobstor
import (
+ "context"
+ "encoding/hex"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Get reads the object from b.
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
-func (b *BlobStor) Get(prm common.GetPrm) (common.GetRes, error) {
+func (b *BlobStor) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("raw", prm.Raw),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.Get(prm)
+ res, err := b.storage[i].Storage.Get(ctx, prm)
if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
return res, err
}
@@ -26,7 +39,7 @@ func (b *BlobStor) Get(prm common.GetPrm) (common.GetRes, error) {
return common.GetRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
}
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.Get(prm)
+ return b.storage[len(b.storage)-1].Storage.Get(ctx, prm)
}
- return b.storage[0].Storage.Get(prm)
+ return b.storage[0].Storage.Get(ctx, prm)
}
diff --git a/pkg/local_object_storage/blobstor/get_range.go b/pkg/local_object_storage/blobstor/get_range.go
index 93939cabb..ff9e72e97 100644
--- a/pkg/local_object_storage/blobstor/get_range.go
+++ b/pkg/local_object_storage/blobstor/get_range.go
@@ -1,23 +1,38 @@
package blobstor
import (
+ "context"
+ "encoding/hex"
"errors"
+ "strconv"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetRange reads object payload data from b.
// If the descriptor is present, only one sub-storage is tried,
// Otherwise, each sub-storage is tried in order.
-func (b *BlobStor) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error) {
+func (b *BlobStor) GetRange(ctx context.Context, prm common.GetRangePrm) (common.GetRangeRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.GetRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ attribute.String("offset", strconv.FormatUint(prm.Range.GetOffset(), 10)),
+ attribute.String("length", strconv.FormatUint(prm.Range.GetLength(), 10)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.GetRange(prm)
+ res, err := b.storage[i].Storage.GetRange(ctx, prm)
if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
return res, err
}
@@ -26,7 +41,7 @@ func (b *BlobStor) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error)
return common.GetRangeRes{}, logicerr.Wrap(apistatus.ObjectNotFound{})
}
if len(prm.StorageID) == 0 {
- return b.storage[len(b.storage)-1].Storage.GetRange(prm)
+ return b.storage[len(b.storage)-1].Storage.GetRange(ctx, prm)
}
- return b.storage[0].Storage.GetRange(prm)
+ return b.storage[0].Storage.GetRange(ctx, prm)
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index 0a74495d7..350bea96a 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"math/rand"
"testing"
@@ -26,7 +27,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.StorageID = objects[i].storageID
prm.Raw = true
- _, err := s.Get(prm)
+ _, err := s.Get(context.Background(), prm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index f3bb4c3f2..ad0045316 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -35,18 +36,18 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
t.Run("exists fail", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
})
t.Run("get fail", func(t *testing.T) {
prm := common.GetPrm{Address: oidtest.Address()}
- _, err := s.Get(prm)
+ _, err := s.Get(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
t.Run("getrange fail", func(t *testing.T) {
prm := common.GetRangePrm{Address: oidtest.Address()}
- _, err := s.GetRange(prm)
+ _, err := s.GetRange(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
})
@@ -75,7 +76,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[3].addr
prm.Raw = true
- res, err := s.Get(prm)
+ res, err := s.Get(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, objects[3].raw, res.RawData)
})
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
index ee16ddcb3..99f6a79e8 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -18,7 +19,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
t.Run("missing object", func(t *testing.T) {
prm := common.ExistsPrm{Address: oidtest.Address()}
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Exists)
})
@@ -29,7 +30,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
t.Run("without storage ID", func(t *testing.T) {
prm.StorageID = nil
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
})
@@ -37,7 +38,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
t.Run("with storage ID", func(t *testing.T) {
prm.StorageID = objects[0].storageID
- res, err := s.Exists(prm)
+ res, err := s.Exists(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Exists)
})
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
index cc3da6b4b..c5755dfba 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -19,7 +20,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetPrm{Address: oidtest.Address()}
- _, err := s.Get(gPrm)
+ _, err := s.Get(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
@@ -29,13 +30,13 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
// With storage ID.
gPrm.StorageID = objects[i].storageID
- res, err := s.Get(gPrm)
+ res, err := s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].obj, res.Object)
// Without storage ID.
gPrm.StorageID = nil
- res, err = s.Get(gPrm)
+ res, err = s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].obj, res.Object)
@@ -43,7 +44,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
gPrm.StorageID = objects[i].storageID
gPrm.Raw = true
- res, err = s.Get(gPrm)
+ res, err = s.Get(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, objects[i].raw, res.RawData)
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
index e105fe6e8..b0c8aa95a 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"math"
"testing"
@@ -20,7 +21,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
t.Run("missing object", func(t *testing.T) {
gPrm := common.GetRangePrm{Address: oidtest.Address()}
- _, err := s.GetRange(gPrm)
+ _, err := s.GetRange(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
@@ -38,14 +39,14 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
t.Run("without storage ID", func(t *testing.T) {
// Without storage ID.
- res, err := s.GetRange(gPrm)
+ res, err := s.GetRange(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, payload[start:stop], res.Data)
})
t.Run("with storage ID", func(t *testing.T) {
gPrm.StorageID = objects[0].storageID
- res, err := s.GetRange(gPrm)
+ res, err := s.GetRange(context.Background(), gPrm)
require.NoError(t, err)
require.Equal(t, payload[start:stop], res.Data)
})
@@ -54,7 +55,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
gPrm.Range.SetOffset(uint64(len(payload) + 10))
gPrm.Range.SetLength(10)
- _, err := s.GetRange(gPrm)
+ _, err := s.GetRange(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
})
@@ -62,7 +63,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
gPrm.Range.SetOffset(10)
gPrm.Range.SetLength(uint64(len(payload)))
- _, err := s.GetRange(gPrm)
+ _, err := s.GetRange(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
})
@@ -70,7 +71,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
gPrm.Range.SetOffset(0)
gPrm.Range.SetLength(1 << 63)
- _, err := s.GetRange(gPrm)
+ _, err := s.GetRange(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
})
@@ -78,7 +79,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
gPrm.Range.SetOffset(10)
gPrm.Range.SetLength(math.MaxUint64 - 2)
- _, err := s.GetRange(gPrm)
+ _, err := s.GetRange(context.Background(), gPrm)
require.ErrorAs(t, err, new(apistatus.ObjectOutOfRange))
})
}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 5f6238476..4068d742e 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -2,6 +2,7 @@
package memstore
import (
+ "context"
"fmt"
"sync"
@@ -32,7 +33,7 @@ func New(opts ...Option) common.Storage {
return st
}
-func (s *memstoreImpl) Get(req common.GetPrm) (common.GetRes, error) {
+func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, error) {
key := req.Address.EncodeToString()
s.mu.RLock()
@@ -58,8 +59,8 @@ func (s *memstoreImpl) Get(req common.GetPrm) (common.GetRes, error) {
return common.GetRes{Object: obj, RawData: data}, nil
}
-func (s *memstoreImpl) GetRange(req common.GetRangePrm) (common.GetRangeRes, error) {
- getResp, err := s.Get(common.GetPrm{
+func (s *memstoreImpl) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) {
+ getResp, err := s.Get(ctx, common.GetPrm{
Address: req.Address,
StorageID: req.StorageID,
})
@@ -80,7 +81,7 @@ func (s *memstoreImpl) GetRange(req common.GetRangePrm) (common.GetRangeRes, err
}, nil
}
-func (s *memstoreImpl) Exists(req common.ExistsPrm) (common.ExistsRes, error) {
+func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.ExistsRes, error) {
key := req.Address.EncodeToString()
s.mu.RLock()
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 531a7d9e7..6482b2cff 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -1,6 +1,7 @@
package memstore
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -32,13 +33,13 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
- resp, err := s.Exists(common.ExistsPrm{Address: addr})
+ resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr})
require.NoError(t, err)
require.True(t, resp.Exists)
}
{
- resp, err := s.Get(common.GetPrm{Address: addr})
+ resp, err := s.Get(context.Background(), common.GetPrm{Address: addr})
require.NoError(t, err)
require.Equal(t, obj.Payload(), resp.Object.Payload())
}
@@ -47,7 +48,7 @@ func TestSimpleLifecycle(t *testing.T) {
var objRange objectSDK.Range
objRange.SetOffset(256)
objRange.SetLength(512)
- resp, err := s.GetRange(common.GetRangePrm{
+ resp, err := s.GetRange(context.Background(), common.GetRangePrm{
Address: addr,
Range: objRange,
})
@@ -61,7 +62,7 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
- resp, err := s.Exists(common.ExistsPrm{Address: addr})
+ resp, err := s.Exists(context.Background(), common.ExistsPrm{Address: addr})
require.NoError(t, err)
require.False(t, resp.Exists)
}
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 0351eb56e..d2359335f 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"fmt"
"os"
"testing"
@@ -127,7 +128,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- _, err := st.Get(common.GetPrm{Address: addrGen.Next()})
+ _, err := st.Get(context.Background(), common.GetPrm{Address: addrGen.Next()})
require.NoError(b, err)
}
})
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
index f6ab4607c..03f64f0f1 100644
--- a/pkg/local_object_storage/blobstor/teststore/teststore.go
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -13,6 +13,7 @@
package teststore
import (
+ "context"
"errors"
"fmt"
"sync"
@@ -140,36 +141,36 @@ func (s *TestStore) SetReportErrorFunc(f func(string, error)) {
}
}
-func (s *TestStore) Get(req common.GetPrm) (common.GetRes, error) {
+func (s *TestStore) Get(ctx context.Context, req common.GetPrm) (common.GetRes, error) {
switch {
case s.overrides.Get != nil:
return s.overrides.Get(req)
case s.st != nil:
- return s.st.Get(req)
+ return s.st.Get(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Get(%+v)", req))
}
}
-func (s *TestStore) GetRange(req common.GetRangePrm) (common.GetRangeRes, error) {
+func (s *TestStore) GetRange(ctx context.Context, req common.GetRangePrm) (common.GetRangeRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.GetRange != nil:
return s.overrides.GetRange(req)
case s.st != nil:
- return s.st.GetRange(req)
+ return s.st.GetRange(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: GetRange(%+v)", req))
}
}
-func (s *TestStore) Exists(req common.ExistsPrm) (common.ExistsRes, error) {
+func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.ExistsRes, error) {
switch {
case s.overrides.Exists != nil:
return s.overrides.Exists(req)
case s.st != nil:
- return s.st.Exists(req)
+ return s.st.Exists(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Exists(%+v)", req))
}
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 12771340b..91bec63a6 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -212,20 +212,20 @@ func TestExecBlocks(t *testing.T) {
require.NoError(t, e.BlockExecution(errBlock))
// try to exec some op
- _, err := Head(e, addr)
+ _, err := Head(context.Background(), e, addr)
require.ErrorIs(t, err, errBlock)
// resume executions
require.NoError(t, e.ResumeExecution())
- _, err = Head(e, addr) // can be any data-related op
+ _, err = Head(context.Background(), e, addr) // can be any data-related op
require.NoError(t, err)
// close
require.NoError(t, e.Close())
// try exec after close
- _, err = Head(e, addr)
+ _, err = Head(context.Background(), e, addr)
require.Error(t, err)
// try to resume
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 6ea5728bb..2105c452f 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -72,7 +72,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(prm.addr)
- resExists, err := sh.Exists(existsPrm)
+ resExists, err := sh.Exists(ctx, existsPrm)
if err != nil {
if shard.IsErrRemoved(err) || shard.IsErrObjectExpired(err) {
return true
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 54d73cee8..259a40a7c 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -93,7 +93,7 @@ func checkGetError(t *testing.T, e *StorageEngine, addr oid.Address, expected an
var getPrm GetPrm
getPrm.WithAddress(addr)
- _, err := e.Get(getPrm)
+ _, err := e.Get(context.Background(), getPrm)
if expected != nil {
require.ErrorAs(t, err, expected)
} else {
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index c9b194f6f..4ff019e4d 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -102,7 +102,7 @@ func TestErrorReporting(t *testing.T) {
te.ng.mtx.RUnlock()
require.NoError(t, err)
- _, err = te.ng.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.NoError(t, err)
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
@@ -115,7 +115,7 @@ func TestErrorReporting(t *testing.T) {
}
for i := uint32(1); i < 3; i++ {
- _, err = te.ng.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
@@ -136,7 +136,7 @@ func TestErrorReporting(t *testing.T) {
te.ng.mtx.RUnlock()
require.NoError(t, err)
- _, err = te.ng.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.NoError(t, err)
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
@@ -149,14 +149,14 @@ func TestErrorReporting(t *testing.T) {
}
for i := uint32(1); i < errThreshold; i++ {
- _, err = te.ng.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
}
for i := uint32(0); i < 2; i++ {
- _, err = te.ng.Get(GetPrm{addr: object.AddressOf(obj)})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.DegradedReadOnly)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
@@ -193,9 +193,9 @@ func TestBlobstorFailback(t *testing.T) {
for i := range objs {
addr := object.AddressOf(objs[i])
- _, err = te.ng.Get(GetPrm{addr: addr})
+ _, err = te.ng.Get(context.Background(), GetPrm{addr: addr})
require.NoError(t, err)
- _, err = te.ng.GetRange(RngPrm{addr: addr})
+ _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr})
require.NoError(t, err)
}
@@ -213,15 +213,15 @@ func TestBlobstorFailback(t *testing.T) {
for i := range objs {
addr := object.AddressOf(objs[i])
- getRes, err := te.ng.Get(GetPrm{addr: addr})
+ getRes, err := te.ng.Get(context.Background(), GetPrm{addr: addr})
require.NoError(t, err)
require.Equal(t, objs[i], getRes.Object())
- rngRes, err := te.ng.GetRange(RngPrm{addr: addr, off: 1, ln: 10})
+ rngRes, err := te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: 1, ln: 10})
require.NoError(t, err)
require.Equal(t, objs[i].Payload()[1:11], rngRes.Object().Payload())
- _, err = te.ng.GetRange(RngPrm{addr: addr, off: errSmallSize + 10, ln: 1})
+ _, err = te.ng.GetRange(context.Background(), RngPrm{addr: addr, off: errSmallSize + 10, ln: 1})
require.ErrorAs(t, err, &apistatus.ObjectOutOfRange{})
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 457228bb2..f16413ea2 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"fmt"
@@ -58,7 +59,7 @@ var errMustHaveTwoShards = errors.New("must have at least 1 spare shard")
// Evacuate moves data from one shard to the others.
// The shard being moved must be in read-only mode.
-func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error) {
+func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (EvacuateShardRes, error) {
shardIDs := make([]string, len(prm.shardID))
for i := range prm.shardID {
shardIDs[i] = prm.shardID[i].String()
@@ -83,7 +84,7 @@ func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error)
var res EvacuateShardRes
for _, shardID := range shardIDs {
- if err = e.evacuateShard(shardID, prm, &res, shards, weights, shardsToEvacuate); err != nil {
+ if err = e.evacuateShard(ctx, shardID, prm, &res, shards, weights, shardsToEvacuate); err != nil {
return res, err
}
}
@@ -92,7 +93,7 @@ func (e *StorageEngine) Evacuate(prm EvacuateShardPrm) (EvacuateShardRes, error)
return res, nil
}
-func (e *StorageEngine) evacuateShard(shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
var listPrm shard.ListWithCursorPrm
listPrm.WithCount(defaultEvacuateBatchSize)
@@ -113,7 +114,7 @@ func (e *StorageEngine) evacuateShard(shardID string, prm EvacuateShardPrm, res
return err
}
- if err = e.evacuateObjects(sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
+ if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, weights, shardsToEvacuate); err != nil {
return err
}
@@ -160,7 +161,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool)
return shards, weights, nil
}
-func (e *StorageEngine) evacuateObjects(sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
+func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.AddressWithType, prm EvacuateShardPrm, res *EvacuateShardRes,
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) error {
for i := range toEvacuate {
addr := toEvacuate[i].Address
@@ -168,7 +169,7 @@ func (e *StorageEngine) evacuateObjects(sh *shard.Shard, toEvacuate []object.Add
var getPrm shard.GetPrm
getPrm.SetAddress(addr)
- getRes, err := sh.Get(getPrm)
+ getRes, err := sh.Get(ctx, getPrm)
if err != nil {
if prm.ignoreErrors {
continue
@@ -176,7 +177,7 @@ func (e *StorageEngine) evacuateObjects(sh *shard.Shard, toEvacuate []object.Add
return err
}
- if e.tryEvacuateObject(addr, getRes.Object(), sh, res, shards, weights, shardsToEvacuate) {
+ if e.tryEvacuateObject(ctx, addr, getRes.Object(), sh, res, shards, weights, shardsToEvacuate) {
continue
}
@@ -195,14 +196,14 @@ func (e *StorageEngine) evacuateObjects(sh *shard.Shard, toEvacuate []object.Add
return nil
}
-func (e *StorageEngine) tryEvacuateObject(addr oid.Address, object *objectSDK.Object, sh *shard.Shard, res *EvacuateShardRes,
+func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, res *EvacuateShardRes,
shards []pooledShard, weights []float64, shardsToEvacuate map[string]*shard.Shard) bool {
hrw.SortHasherSliceByWeightValue(shards, weights, hrw.Hash([]byte(addr.EncodeToString())))
for j := range shards {
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- putDone, exists := e.putToShard(shards[j].hashedShard, j, shards[j].pool, addr, object)
+ putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
if putDone || exists {
if putDone {
e.log.Debug("object is moved to another shard",
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 51abc4b1c..c116aeff9 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -91,7 +91,7 @@ func TestEvacuateShard(t *testing.T) {
var prm GetPrm
prm.WithAddress(objectCore.AddressOf(objects[i]))
- _, err := e.Get(prm)
+ _, err := e.Get(context.Background(), prm)
require.NoError(t, err)
}
}
@@ -102,14 +102,14 @@ func TestEvacuateShard(t *testing.T) {
prm.WithShardIDList(ids[2:3])
t.Run("must be read-only", func(t *testing.T) {
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, shard.ErrMustBeReadOnly)
require.Equal(t, 0, res.Count())
})
require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly))
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, objPerShard, res.count)
@@ -120,7 +120,7 @@ func TestEvacuateShard(t *testing.T) {
checkHasObjects(t)
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
- res, err = e.Evacuate(prm)
+ res, err = e.Evacuate(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, 0, res.count)
@@ -165,13 +165,13 @@ func TestEvacuateNetwork(t *testing.T) {
var prm EvacuateShardPrm
prm.shardID = ids[0:1]
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errMustHaveTwoShards)
require.Equal(t, 0, res.Count())
prm.handler = acceptOneOf(objects, 2)
- res, err = e.Evacuate(prm)
+ res, err = e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errReplication)
require.Equal(t, 2, res.Count())
})
@@ -185,14 +185,14 @@ func TestEvacuateNetwork(t *testing.T) {
prm.shardID = ids[1:2]
prm.handler = acceptOneOf(objects, 2)
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errReplication)
require.Equal(t, 2, res.Count())
t.Run("no errors", func(t *testing.T) {
prm.handler = acceptOneOf(objects, 3)
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, 3, res.Count())
})
@@ -217,14 +217,14 @@ func TestEvacuateNetwork(t *testing.T) {
prm.shardID = evacuateIDs
prm.handler = acceptOneOf(objects, totalCount-1)
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.ErrorIs(t, err, errReplication)
require.Equal(t, totalCount-1, res.Count())
t.Run("no errors", func(t *testing.T) {
prm.handler = acceptOneOf(objects, totalCount)
- res, err := e.Evacuate(prm)
+ res, err := e.Evacuate(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, totalCount, res.Count())
})
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index a43c7f23f..3a8e09a6d 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -16,7 +17,7 @@ func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
exists := false
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Exists(shPrm)
+ res, err := sh.Exists(context.TODO(), shPrm)
if err != nil {
if shard.IsErrRemoved(err) {
alreadyRemoved = true
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 4d0a30bc8..7d17b50fa 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -1,14 +1,18 @@
package engine
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -43,16 +47,22 @@ func (r GetRes) Object() *objectSDK.Object {
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Get(prm GetPrm) (res GetRes, err error) {
+func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.get(prm)
+ res, err = e.get(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) get(prm GetPrm) (GetRes, error) {
+func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
if e.metrics != nil {
defer elapsed(e.metrics.AddGetDuration)()
}
@@ -69,7 +79,7 @@ func (e *StorageEngine) get(prm GetPrm) (GetRes, error) {
Engine: e,
}
- it.tryGetWithMeta()
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -84,7 +94,7 @@ func (e *StorageEngine) get(prm GetPrm) (GetRes, error) {
return GetRes{}, it.OutError
}
- it.tryGetFromBlobstore()
+ it.tryGetFromBlobstore(ctx)
if it.Object == nil {
return GetRes{}, it.OutError
@@ -116,14 +126,14 @@ type getShardIterator struct {
splitInfoErr *objectSDK.SplitInfoError
}
-func (i *getShardIterator) tryGetWithMeta() {
+func (i *getShardIterator) tryGetWithMeta(ctx context.Context) {
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.ShardPrm.SetIgnoreMeta(noMeta)
i.HasDegraded = i.HasDegraded || noMeta
- res, err := sh.Get(i.ShardPrm)
+ res, err := sh.Get(ctx, i.ShardPrm)
if err == nil {
i.Object = res.Object()
return true
@@ -162,7 +172,7 @@ func (i *getShardIterator) tryGetWithMeta() {
})
}
-func (i *getShardIterator) tryGetFromBlobstore() {
+func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
@@ -174,18 +184,18 @@ func (i *getShardIterator) tryGetFromBlobstore() {
return false
}
- res, err := sh.Get(i.ShardPrm)
+ res, err := sh.Get(ctx, i.ShardPrm)
i.Object = res.Object()
return err == nil
})
}
// Get reads object from local storage by provided address.
-func Get(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
+func Get(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
var getPrm GetPrm
getPrm.WithAddress(addr)
- res, err := storage.Get(getPrm)
+ res, err := storage.Get(ctx, getPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 689b46de8..130e76c3d 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -1,8 +1,10 @@
package engine
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -52,16 +54,19 @@ func (r HeadRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object was inhumed.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Head(prm HeadPrm) (res HeadRes, err error) {
+func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.head(prm)
+ res, err = e.head(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) head(prm HeadPrm) (HeadRes, error) {
+func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
+ defer span.End()
+
if e.metrics != nil {
defer elapsed(e.metrics.AddHeadDuration)()
}
@@ -81,7 +86,7 @@ func (e *StorageEngine) head(prm HeadPrm) (HeadRes, error) {
shPrm.SetRaw(prm.raw)
e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Head(shPrm)
+ res, err := sh.Head(ctx, shPrm)
if err != nil {
switch {
case shard.IsErrNotFound(err):
@@ -139,11 +144,11 @@ func (e *StorageEngine) head(prm HeadPrm) (HeadRes, error) {
}
// Head reads object header from local storage by provided address.
-func Head(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
+func Head(ctx context.Context, storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
var headPrm HeadPrm
headPrm.WithAddress(addr)
- res, err := storage.Head(headPrm)
+ res, err := storage.Head(ctx, headPrm)
if err != nil {
return nil, err
}
@@ -153,12 +158,12 @@ func Head(storage *StorageEngine, addr oid.Address) (*objectSDK.Object, error) {
// HeadRaw reads object header from local storage by provided address and raw
// flag.
-func HeadRaw(storage *StorageEngine, addr oid.Address, raw bool) (*objectSDK.Object, error) {
+func HeadRaw(ctx context.Context, storage *StorageEngine, addr oid.Address, raw bool) (*objectSDK.Object, error) {
var headPrm HeadPrm
headPrm.WithAddress(addr)
headPrm.WithRaw(raw)
- res, err := storage.Head(headPrm)
+ res, err := storage.Head(ctx, headPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index e2a1edc98..e5fd4b04f 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"os"
"testing"
@@ -66,7 +67,7 @@ func TestHeadRaw(t *testing.T) {
headPrm.WithAddress(parentAddr)
headPrm.WithRaw(true)
- _, err = e.Head(headPrm)
+ _, err = e.Head(context.Background(), headPrm)
require.Error(t, err)
var si *object.SplitInfoError
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 2ecca5256..db9988338 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -134,7 +134,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
if checkExists {
existPrm.SetAddress(addr)
- exRes, err := sh.Exists(existPrm)
+ exRes, err := sh.Exists(ctx, existPrm)
if err != nil {
if shard.IsErrRemoved(err) || shard.IsErrObjectExpired(err) {
// inhumed once - no need to be inhumed again
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 20a4d68e8..60a1d9c9f 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -69,7 +70,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addrLocked)
- exRes, err := sh.Exists(existsPrm)
+ exRes, err := sh.Exists(context.TODO(), existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
if !errors.As(err, &siErr) {
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 68a4467f2..5f9105efc 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -72,7 +73,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
return false
}
- putDone, exists := e.putToShard(sh, ind, pool, addr, prm.obj)
+ putDone, exists := e.putToShard(context.TODO(), sh, ind, pool, addr, prm.obj)
finished = putDone || exists
return finished
})
@@ -87,7 +88,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
// putToShard puts object to sh.
// First return value is true iff put has been successfully done.
// Second return value is true iff object already exists.
-func (e *StorageEngine) putToShard(sh hashedShard, ind int, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object) (bool, bool) {
+func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object) (bool, bool) {
var putSuccess, alreadyExists bool
exitCh := make(chan struct{})
@@ -98,7 +99,7 @@ func (e *StorageEngine) putToShard(sh hashedShard, ind int, pool util.WorkerPool
var existPrm shard.ExistsPrm
existPrm.SetAddress(addr)
- exists, err := sh.Exists(existPrm)
+ exists, err := sh.Exists(ctx, existPrm)
if err != nil {
if shard.IsErrObjectExpired(err) {
// object is already found but
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index 25b533bd4..3d119ac6f 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -1,14 +1,19 @@
package engine
import (
+ "context"
"errors"
+ "strconv"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -56,16 +61,24 @@ func (r RngRes) Object() *objectSDK.Object {
// Returns ErrRangeOutOfBounds if the requested object range is out of bounds.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) GetRange(prm RngPrm) (res RngRes, err error) {
+func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) {
err = e.execIfNotBlocked(func() error {
- res, err = e.getRange(prm)
+ res, err = e.getRange(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
+func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+
if e.metrics != nil {
defer elapsed(e.metrics.AddRangeDuration)()
}
@@ -83,7 +96,7 @@ func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
Engine: e,
}
- it.tryGetWithMeta()
+ it.tryGetWithMeta(ctx)
if it.SplitInfo != nil {
return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo))
@@ -96,7 +109,7 @@ func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
return RngRes{}, it.OutError
}
- it.tryGetFromBlobstor()
+ it.tryGetFromBlobstor(ctx)
if it.Object == nil {
return RngRes{}, it.OutError
@@ -114,12 +127,12 @@ func (e *StorageEngine) getRange(prm RngPrm) (RngRes, error) {
}
// GetRange reads object payload range from local storage by provided address.
-func GetRange(storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([]byte, error) {
+func GetRange(ctx context.Context, storage *StorageEngine, addr oid.Address, rng *objectSDK.Range) ([]byte, error) {
var rangePrm RngPrm
rangePrm.WithAddress(addr)
rangePrm.WithPayloadRange(rng)
- res, err := storage.GetRange(rangePrm)
+ res, err := storage.GetRange(ctx, rangePrm)
if err != nil {
return nil, err
}
@@ -141,13 +154,13 @@ type getRangeShardIterator struct {
Engine *StorageEngine
}
-func (i *getRangeShardIterator) tryGetWithMeta() {
+func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) {
i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) {
noMeta := sh.GetMode().NoMetabase()
i.HasDegraded = i.HasDegraded || noMeta
i.ShardPrm.SetIgnoreMeta(noMeta)
- res, err := sh.GetRange(i.ShardPrm)
+ res, err := sh.GetRange(ctx, i.ShardPrm)
if err == nil {
i.Object = res.Object()
return true
@@ -185,7 +198,7 @@ func (i *getRangeShardIterator) tryGetWithMeta() {
})
}
-func (i *getRangeShardIterator) tryGetFromBlobstor() {
+func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) {
// If the object is not found but is present in metabase,
// try to fetch it from blobstor directly. If it is found in any
// blobstor, increase the error counter for the shard which contains the meta.
@@ -197,7 +210,7 @@ func (i *getRangeShardIterator) tryGetFromBlobstor() {
return false
}
- res, err := sh.GetRange(i.ShardPrm)
+ res, err := sh.GetRange(ctx, i.ShardPrm)
if shard.IsErrOutOfRange(err) {
var errOutOfRange apistatus.ObjectOutOfRange
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index d881a52d1..c50c0844c 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -116,7 +116,7 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addr)
- res, err := shards[i].Exists(existsPrm)
+ res, err := shards[i].Exists(ctx, existsPrm)
if err != nil {
return err
} else if !res.Exists() {
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index df7e536cb..50ea20bb8 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -63,6 +63,7 @@ func TestShardOpen(t *testing.T) {
newShard := func() *Shard {
return New(
+ WithID(NewIDFromBytes([]byte{})),
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
@@ -146,6 +147,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
require.NoError(t, err)
sh = New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta_new")), meta.WithEpochState(epochState{})),
@@ -155,7 +157,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
var getPrm GetPrm
getPrm.SetAddress(addr)
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
require.NoError(t, sh.Close())
}
@@ -176,6 +178,7 @@ func TestRefillMetabase(t *testing.T) {
}
sh := New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(
meta.WithPath(filepath.Join(p, "meta")),
@@ -277,7 +280,7 @@ func TestRefillMetabase(t *testing.T) {
checkObj := func(addr oid.Address, expObj *objectSDK.Object) {
headPrm.SetAddress(addr)
- res, err := sh.Head(headPrm)
+ res, err := sh.Head(context.Background(), headPrm)
if expObj == nil {
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
@@ -302,7 +305,7 @@ func TestRefillMetabase(t *testing.T) {
for _, member := range tombMembers {
headPrm.SetAddress(member)
- _, err := sh.Head(headPrm)
+ _, err := sh.Head(context.Background(), headPrm)
if exists {
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
@@ -343,6 +346,7 @@ func TestRefillMetabase(t *testing.T) {
require.NoError(t, err)
sh = New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(
meta.WithPath(filepath.Join(p, "meta_restored")),
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index 9115f3e0d..c37dfa285 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -51,7 +52,7 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err = sh.Delete(delPrm)
require.NoError(t, err)
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
@@ -69,13 +70,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(putPrm)
require.NoError(t, err)
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Delete(delPrm)
require.NoError(t, err)
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
}
diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go
index 65427dd5c..9d585cc06 100644
--- a/pkg/local_object_storage/shard/dump_test.go
+++ b/pkg/local_object_storage/shard/dump_test.go
@@ -2,6 +2,7 @@ package shard_test
import (
"bytes"
+ "context"
"io"
"math/rand"
"os"
@@ -276,7 +277,7 @@ func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects [
for i := range objects {
getPrm.SetAddress(object.AddressOf(objects[i]))
- res, err := sh.Get(getPrm)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, objects[i], res.Object())
}
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 35b9cba9b..76e4347d4 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -1,6 +1,8 @@
package shard
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -33,7 +35,7 @@ func (p ExistsRes) Exists() bool {
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Exists(prm ExistsPrm) (ExistsRes, error) {
+func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
var exists bool
var err error
@@ -45,7 +47,7 @@ func (s *Shard) Exists(prm ExistsPrm) (ExistsRes, error) {
p.Address = prm.addr
var res common.ExistsRes
- res, err = s.blobStor.Exists(p)
+ res, err = s.blobStor.Exists(ctx, p)
exists = res.Exists
} else {
var existsPrm meta.ExistsPrm
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index e7aa3614e..8012e60f8 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -33,6 +33,7 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
rootPath := t.TempDir()
opts := []shard.Option{
+ shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithLogger(&logger.Logger{Logger: zap.NewNop()}),
shard.WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
@@ -115,7 +116,7 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
var getPrm shard.GetPrm
getPrm.SetAddress(objectCore.AddressOf(obj))
require.Eventually(t, func() bool {
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
return shard.IsErrNotFound(err)
}, 3*time.Second, 1*time.Second, "expired object must be deleted")
}
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 4231c01db..3406b9338 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -1,8 +1,10 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -11,6 +13,8 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -61,7 +65,15 @@ func (r GetRes) HasMeta() bool {
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in shard.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Get(prm GetPrm) (GetRes, error) {
+func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Get",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("skip_meta", prm.skipMeta),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -70,7 +82,7 @@ func (s *Shard) Get(prm GetPrm) (GetRes, error) {
getPrm.Address = prm.addr
getPrm.StorageID = id
- res, err := stor.Get(getPrm)
+ res, err := stor.Get(ctx, getPrm)
if err != nil {
return nil, err
}
@@ -79,7 +91,7 @@ func (s *Shard) Get(prm GetPrm) (GetRes, error) {
}
wc := func(c writecache.Cache) (*objectSDK.Object, error) {
- return c.Get(prm.addr)
+ return c.Get(ctx, prm.addr)
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 9d1975331..f670b2864 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -2,6 +2,7 @@ package shard_test
import (
"bytes"
+ "context"
"errors"
"testing"
"time"
@@ -111,11 +112,11 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
}
func testGet(t *testing.T, sh *shard.Shard, getPrm shard.GetPrm, hasWriteCache bool) (shard.GetRes, error) {
- res, err := sh.Get(getPrm)
+ res, err := sh.Get(context.Background(), getPrm)
if hasWriteCache {
require.Eventually(t, func() bool {
if shard.IsErrNotFound(err) {
- res, err = sh.Get(getPrm)
+ res, err = sh.Get(context.Background(), getPrm)
}
return !shard.IsErrNotFound(err)
}, time.Second, time.Millisecond*100)
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 6913d3162..8e8ff9433 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -1,9 +1,14 @@
package shard
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// HeadPrm groups the parameters of Head operation.
@@ -43,7 +48,15 @@ func (r HeadRes) Object() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in Shard.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) Head(prm HeadPrm) (HeadRes, error) {
+func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Head",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("raw", prm.raw),
+ ))
+ defer span.End()
+
var obj *objectSDK.Object
var err error
if s.GetMode().NoMetabase() {
@@ -52,7 +65,7 @@ func (s *Shard) Head(prm HeadPrm) (HeadRes, error) {
getPrm.SetIgnoreMeta(true)
var res GetRes
- res, err = s.Get(getPrm)
+ res, err = s.Get(ctx, getPrm)
obj = res.Object()
} else {
var headParams meta.GetPrm
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index 36c8915b5..449626e93 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"errors"
"testing"
"time"
@@ -75,18 +76,18 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(parent))
headPrm.SetRaw(false)
- head, err := sh.Head(headPrm)
+ head, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, parent.CutPayload(), head.Object())
})
}
func testHead(t *testing.T, sh *shard.Shard, headPrm shard.HeadPrm, hasWriteCache bool) (shard.HeadRes, error) {
- res, err := sh.Head(headPrm)
+ res, err := sh.Head(context.Background(), headPrm)
if hasWriteCache {
require.Eventually(t, func() bool {
if shard.IsErrNotFound(err) {
- res, err = sh.Head(headPrm)
+ res, err = sh.Head(context.Background(), headPrm)
}
return !shard.IsErrNotFound(err)
}, time.Second, time.Millisecond*100)
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 191afab01..41845c414 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -51,6 +51,6 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err = sh.Inhume(context.Background(), inhPrm)
require.NoError(t, err)
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 995aa1473..2bee66298 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -27,6 +27,7 @@ func TestShard_Lock(t *testing.T) {
rootPath := t.TempDir()
opts := []shard.Option{
+ shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithLogger(&logger.Logger{Logger: zap.NewNop()}),
shard.WithBlobStorOptions(
blobstor.WithStorages([]blobstor.SubStorage{
@@ -137,7 +138,7 @@ func TestShard_Lock(t *testing.T) {
var getPrm shard.GetPrm
getPrm.SetAddress(objectcore.AddressOf(obj))
- _, err = sh.Get(getPrm)
+ _, err = sh.Get(context.Background(), getPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
}
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index c9106c235..4355c31a3 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -1,6 +1,10 @@
package shard
import (
+ "context"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -8,6 +12,8 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// RngPrm groups the parameters of GetRange operation.
@@ -66,7 +72,17 @@ func (r RngRes) HasMeta() bool {
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing.
// Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (s *Shard) GetRange(prm RngPrm) (RngRes, error) {
+func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetRange",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("skip_meta", prm.skipMeta),
+ attribute.String("offset", strconv.FormatUint(prm.off, 10)),
+ attribute.String("length", strconv.FormatUint(prm.ln, 10)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -77,7 +93,7 @@ func (s *Shard) GetRange(prm RngPrm) (RngRes, error) {
getRngPrm.Range.SetLength(prm.ln)
getRngPrm.StorageID = id
- res, err := stor.GetRange(getRngPrm)
+ res, err := stor.GetRange(ctx, getRngPrm)
if err != nil {
return nil, err
}
@@ -89,7 +105,7 @@ func (s *Shard) GetRange(prm RngPrm) (RngRes, error) {
}
wc := func(c writecache.Cache) (*object.Object, error) {
- res, err := c.Get(prm.addr)
+ res, err := c.Get(ctx, prm.addr)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 6782dca1e..164181214 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"math"
"path/filepath"
"testing"
@@ -105,7 +106,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
rngPrm.SetAddress(addr)
rngPrm.SetRange(tc.rng.GetOffset(), tc.rng.GetLength())
- res, err := sh.GetRange(rngPrm)
+ res, err := sh.GetRange(context.Background(), rngPrm)
if tc.hasErr {
require.ErrorAs(t, err, &apistatus.ObjectOutOfRange{})
} else {
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 7aa331c7f..1bfa33dd7 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -66,7 +66,7 @@ func TestShardReload(t *testing.T) {
var prm ExistsPrm
prm.SetAddress(objects[i].addr)
- res, err := sh.Exists(prm)
+ res, err := sh.Exists(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, exists, res.Exists(), "object #%d is missing", i)
}
diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go
index 027e6ca7b..fea342766 100644
--- a/pkg/local_object_storage/shard/shard_test.go
+++ b/pkg/local_object_storage/shard/shard_test.go
@@ -63,6 +63,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
}
opts := []shard.Option{
+ shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithLogger(&logger.Logger{Logger: zap.L()}),
shard.WithBlobStorOptions(bsOpts...),
shard.WithMetaBaseOptions(
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index 57a982684..5fd13221a 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"math/rand"
"testing"
@@ -55,7 +56,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
for i := range objects {
getPrm.SetAddress(object.AddressOf(objects[i]))
- _, err := sh.Get(getPrm)
+ _, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err, i)
}
}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index a6c2035db..9dc216fb3 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -1,6 +1,7 @@
package writecache
import (
+ "context"
"os"
"path/filepath"
"testing"
@@ -95,7 +96,7 @@ func TestFlush(t *testing.T) {
prm.Address = objects[i].addr
prm.StorageID = mRes.StorageID()
- res, err := bs.Get(prm)
+ res, err := bs.Get(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, objects[i].obj, res.Object)
}
@@ -119,7 +120,7 @@ func TestFlush(t *testing.T) {
_, err := mb.Get(mPrm)
require.Error(t, err)
- _, err = bs.Get(common.GetPrm{Address: objects[i].addr})
+ _, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
require.Error(t, err)
}
@@ -149,7 +150,7 @@ func TestFlush(t *testing.T) {
_, err := mb.Get(mPrm)
require.Error(t, err)
- _, err = bs.Get(common.GetPrm{Address: objects[i].addr})
+ _, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
require.Error(t, err)
}
@@ -266,7 +267,7 @@ func TestFlush(t *testing.T) {
require.NoError(t, wc.Open(true))
initWC(t, wc)
for i := range objects {
- _, err := wc.Get(objects[i].addr)
+ _, err := wc.Get(context.Background(), objects[i].addr)
require.NoError(t, err, i)
}
require.NoError(t, wc.Close())
@@ -275,7 +276,7 @@ func TestFlush(t *testing.T) {
require.NoError(t, wc.Open(false))
initWC(t, wc)
for i := range objects {
- _, err := wc.Get(objects[i].addr)
+ _, err := wc.Get(context.Background(), objects[i].addr)
if i < 2 {
require.ErrorAs(t, err, new(apistatus.ObjectNotFound), i)
} else {
diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go
index a15f42e18..6af1bd181 100644
--- a/pkg/local_object_storage/writecache/get.go
+++ b/pkg/local_object_storage/writecache/get.go
@@ -1,6 +1,9 @@
package writecache
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -8,14 +11,22 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Get returns object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
-func (c *cache) Get(addr oid.Address) (*objectSDK.Object, error) {
+func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
saddr := addr.EncodeToString()
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Get",
+ trace.WithAttributes(
+ attribute.String("address", saddr),
+ ))
+ defer span.End()
+
value, err := Get(c.db, []byte(saddr))
if err == nil {
obj := objectSDK.New()
@@ -23,7 +34,7 @@ func (c *cache) Get(addr oid.Address) (*objectSDK.Object, error) {
return obj, obj.Unmarshal(value)
}
- res, err := c.fsTree.Get(common.GetPrm{Address: addr})
+ res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr})
if err != nil {
return nil, logicerr.Wrap(apistatus.ObjectNotFound{})
}
@@ -35,8 +46,14 @@ func (c *cache) Get(addr oid.Address) (*objectSDK.Object, error) {
// Head returns object header from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if the requested object is missing in write-cache.
-func (c *cache) Head(addr oid.Address) (*objectSDK.Object, error) {
- obj, err := c.Get(addr)
+func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Head",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
+ obj, err := c.Get(ctx, addr)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go
index 56b27ec4b..ffe7a0129 100644
--- a/pkg/local_object_storage/writecache/init.go
+++ b/pkg/local_object_storage/writecache/init.go
@@ -1,6 +1,7 @@
package writecache
import (
+ "context"
"errors"
"sync"
@@ -177,6 +178,6 @@ func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
prm.SetAddress(addr)
mRes, _ := c.metabase.StorageID(prm)
- res, err := c.blobstor.Exists(common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
+ res, err := c.blobstor.Exists(context.TODO(), common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
return err == nil && res.Exists, false
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 59c7c9d47..cca8986b3 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,6 +1,7 @@
package writecache
import (
+ "context"
"io/fs"
"os"
"time"
@@ -27,7 +28,7 @@ type metabase interface {
type blob interface {
Put(common.PutPrm) (common.PutRes, error)
NeedsCompression(obj *objectSDK.Object) bool
- Exists(res common.ExistsPrm) (common.ExistsRes, error)
+ Exists(ctx context.Context, res common.ExistsPrm) (common.ExistsRes, error)
}
type options struct {
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index a5b8ff0a3..24070dbda 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -1,6 +1,7 @@
package writecache
import (
+ "context"
"os"
"sync"
@@ -23,8 +24,8 @@ type Info struct {
// Cache represents write-cache for objects.
type Cache interface {
- Get(address oid.Address) (*object.Object, error)
- Head(oid.Address) (*object.Object, error)
+ Get(ctx context.Context, address oid.Address) (*object.Object, error)
+ Head(context.Context, oid.Address) (*object.Object, error)
// Delete removes object referenced by the given oid.Address from the
// Cache. Returns any error encountered that prevented the object to be
// removed.
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
index 6c064efa3..b64a91883 100644
--- a/pkg/services/control/server/evacuate.go
+++ b/pkg/services/control/server/evacuate.go
@@ -19,7 +19,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) EvacuateShard(_ context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
+func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -30,7 +30,7 @@ func (s *Server) EvacuateShard(_ context.Context, req *control.EvacuateShardRequ
prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
prm.WithFaultHandler(s.replicate)
- res, err := s.s.Evacuate(prm)
+ res, err := s.s.Evacuate(ctx, prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/notificator/deps.go b/pkg/services/notificator/deps.go
index ded4b4b7d..d6330f788 100644
--- a/pkg/services/notificator/deps.go
+++ b/pkg/services/notificator/deps.go
@@ -1,6 +1,8 @@
package notificator
import (
+ "context"
+
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -8,7 +10,7 @@ import (
type NotificationSource interface {
// Iterate must iterate over all notifications for the
// provided epoch and call handler for all of them.
- Iterate(epoch uint64, handler func(topic string, addr oid.Address))
+ Iterate(ctx context.Context, epoch uint64, handler func(topic string, addr oid.Address))
}
// NotificationWriter notifies all the subscribers
diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go
index 096618300..0a8a5d96d 100644
--- a/pkg/services/notificator/service.go
+++ b/pkg/services/notificator/service.go
@@ -1,6 +1,7 @@
package notificator
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -71,11 +72,11 @@ func New(prm *Prm) *Notificator {
// ProcessEpoch looks for all objects with defined epoch in the storage
// and passes their addresses to the NotificationWriter.
-func (n *Notificator) ProcessEpoch(epoch uint64) {
+func (n *Notificator) ProcessEpoch(ctx context.Context, epoch uint64) {
logger := n.l.With(zap.Uint64("epoch", epoch))
logger.Debug("notificator: start processing object notifications")
- n.ns.Iterate(epoch, func(topic string, addr oid.Address) {
+ n.ns.Iterate(ctx, epoch, func(topic string, addr oid.Address) {
n.l.Debug("notificator: processing object notification",
zap.String("topic", topic),
zap.Stringer("address", addr),
diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go
index 4570e271a..ce5d98d5d 100644
--- a/pkg/services/object/acl/eacl/v2/eacl_test.go
+++ b/pkg/services/object/acl/eacl/v2/eacl_test.go
@@ -1,6 +1,7 @@
package v2
import (
+ "context"
"crypto/ecdsa"
"errors"
"testing"
@@ -26,7 +27,7 @@ type testLocalStorage struct {
err error
}
-func (s *testLocalStorage) Head(addr oid.Address) (*object.Object, error) {
+func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*object.Object, error) {
require.True(s.t, addr.Container().Equals(s.expAddr.Container()))
require.True(s.t, addr.Object().Equals(s.expAddr.Object()))
diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go
index 736c05763..095810848 100644
--- a/pkg/services/object/acl/eacl/v2/headers.go
+++ b/pkg/services/object/acl/eacl/v2/headers.go
@@ -1,6 +1,7 @@
package v2
import (
+ "context"
"errors"
"fmt"
@@ -27,7 +28,7 @@ type cfg struct {
}
type ObjectStorage interface {
- Head(oid.Address) (*object.Object, error)
+ Head(context.Context, oid.Address) (*object.Object, error)
}
type Request interface {
@@ -207,7 +208,7 @@ func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, b
addr.SetContainer(cnr)
addr.SetObject(*idObj)
- obj, err := h.storage.Head(addr)
+ obj, err := h.storage.Head(context.TODO(), addr)
if err == nil {
return headersFromObject(obj, cnr, idObj), true
}
diff --git a/pkg/services/object/acl/eacl/v2/localstore.go b/pkg/services/object/acl/eacl/v2/localstore.go
index 40271f1cd..0f23e9881 100644
--- a/pkg/services/object/acl/eacl/v2/localstore.go
+++ b/pkg/services/object/acl/eacl/v2/localstore.go
@@ -1,6 +1,7 @@
package v2
import (
+ "context"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
@@ -12,10 +13,10 @@ type localStorage struct {
ls *engine.StorageEngine
}
-func (s *localStorage) Head(addr oid.Address) (*objectSDK.Object, error) {
+func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
if s.ls == nil {
return nil, io.ErrUnexpectedEOF
}
- return engine.Head(s.ls, addr)
+ return engine.Head(ctx, s.ls, addr)
}
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 3d1a95cbb..319bc6b58 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -139,7 +139,7 @@ func (c *testClient) addResult(addr oid.Address, obj *objectSDK.Object, err erro
}{obj: obj, err: err}
}
-func (s *testStorage) get(exec *execCtx) (*objectSDK.Object, error) {
+func (s *testStorage) get(_ context.Context, exec *execCtx) (*objectSDK.Object, error) {
var (
ok bool
obj *objectSDK.Object
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index a6a77729c..82ed911e4 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -4,15 +4,21 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
func (exec *execCtx) executeLocal(ctx context.Context) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getService.executeLocal")
+ defer func() {
+ span.End()
+ }()
+
var err error
- exec.collectedObject, err = exec.svc.localStorage.get(exec)
+ exec.collectedObject, err = exec.svc.localStorage.get(ctx, exec)
var errSplitInfo *objectSDK.SplitInfoError
var errRemoved apistatus.ObjectAlreadyRemoved
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 1532bade0..697e48ee2 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -11,6 +12,9 @@ import (
)
func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
+ defer span.End()
+
exec.log.Debug("processing node...")
client, ok := exec.remoteClient(info)
diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go
index dfa3b48ac..a9391d016 100644
--- a/pkg/services/object/get/service.go
+++ b/pkg/services/object/get/service.go
@@ -31,7 +31,7 @@ type cfg struct {
log *logger.Logger
localStorage interface {
- get(*execCtx) (*object.Object, error)
+ get(context.Context, *execCtx) (*object.Object, error)
}
clientCache interface {
diff --git a/pkg/services/object/get/util.go b/pkg/services/object/get/util.go
index 09d8c67af..dd4ace407 100644
--- a/pkg/services/object/get/util.go
+++ b/pkg/services/object/get/util.go
@@ -200,13 +200,13 @@ func (c *clientWrapper) get(ctx context.Context, exec *execCtx, key *ecdsa.Priva
return res.Object(), nil
}
-func (e *storageEngineWrapper) get(exec *execCtx) (*object.Object, error) {
+func (e *storageEngineWrapper) get(ctx context.Context, exec *execCtx) (*object.Object, error) {
if exec.headOnly() {
var headPrm engine.HeadPrm
headPrm.WithAddress(exec.address())
headPrm.WithRaw(exec.isRaw())
- r, err := e.engine.Head(headPrm)
+ r, err := e.engine.Head(ctx, headPrm)
if err != nil {
return nil, err
}
@@ -217,7 +217,7 @@ func (e *storageEngineWrapper) get(exec *execCtx) (*object.Object, error) {
getRange.WithAddress(exec.address())
getRange.WithPayloadRange(rng)
- r, err := e.engine.GetRange(getRange)
+ r, err := e.engine.GetRange(ctx, getRange)
if err != nil {
return nil, err
}
@@ -227,7 +227,7 @@ func (e *storageEngineWrapper) get(exec *execCtx) (*object.Object, error) {
var getPrm engine.GetPrm
getPrm.WithAddress(exec.address())
- r, err := e.engine.Get(getPrm)
+ r, err := e.engine.Get(ctx, getPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 330a0642f..8163ae928 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -9,6 +9,7 @@ import (
"sync"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
@@ -18,6 +19,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type getRequestForwarder struct {
@@ -30,6 +33,11 @@ type getRequestForwarder struct {
}
func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
var err error
// once compose and resign forwarding request
diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go
index 5893f8de3..9cf6384ed 100644
--- a/pkg/services/object/get/v2/get_range_forwarder.go
+++ b/pkg/services/object/get/v2/get_range_forwarder.go
@@ -9,6 +9,7 @@ import (
"sync"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
@@ -18,6 +19,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type getRangeRequestForwarder struct {
@@ -29,6 +32,11 @@ type getRangeRequestForwarder struct {
}
func (f *getRangeRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "getRangeRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
var err error
// once compose and resign forwarding request
diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go
index 45c0174fd..e1d4c02db 100644
--- a/pkg/services/object/get/v2/head_forwarder.go
+++ b/pkg/services/object/get/v2/head_forwarder.go
@@ -8,6 +8,7 @@ import (
"sync"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
@@ -19,6 +20,8 @@ import (
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type headRequestForwarder struct {
@@ -30,6 +33,11 @@ type headRequestForwarder struct {
}
func (f *headRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*object.Object, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "headRequestForwarder.forwardRequestToNode",
+ trace.WithAttributes(attribute.String("address", addr.String())),
+ )
+ defer span.End()
+
var err error
// once compose and resign forwarding request
diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go
index 3fd8cd04a..1bd8befaf 100644
--- a/pkg/services/object/get/v2/service.go
+++ b/pkg/services/object/get/v2/service.go
@@ -96,7 +96,7 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV
resp := new(objectV2.HeadResponse)
resp.SetBody(new(objectV2.HeadResponseBody))
- p, err := s.toHeadPrm(ctx, req, resp)
+ p, err := s.toHeadPrm(req, resp)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go
index 3a50a6ca5..69bed23f4 100644
--- a/pkg/services/object/get/v2/util.go
+++ b/pkg/services/object/get/v2/util.go
@@ -215,7 +215,7 @@ func (w *headResponseWriter) WriteHeader(_ context.Context, hdr *object.Object)
return nil
}
-func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) {
+func (s *Service) toHeadPrm(req *objectV2.HeadRequest, resp *objectV2.HeadResponse) (*getsvc.HeadPrm, error) {
body := req.GetBody()
addrV2 := body.GetAddress()
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 476a5bc0a..53df81b77 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -27,7 +27,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult)
if task.obj == nil {
var err error
- task.obj, err = engine.Get(p.localStorage, task.addr)
+ task.obj, err = engine.Get(ctx, p.localStorage, task.addr)
if err != nil {
p.log.Error("could not get object from local storage",
zap.Stringer("object", task.addr),
From b2ca73054722fcc4f73746d75b1599377c82040a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 11 Apr 2023 18:03:57 +0300
Subject: [PATCH 0088/1943] [#135] acl: Add tracing spans
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/acl/v2/service.go | 78 +++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 6544d78d7..93c1c65f8 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -6,6 +6,7 @@ import (
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -111,6 +112,14 @@ func New(opts ...Option) Service {
// Get implements ServiceServer interface, makes ACL checks and calls
// next Get method in the ServiceServer pipeline.
func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -158,6 +167,9 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream
return eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
return b.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
info: reqInfo,
@@ -177,6 +189,14 @@ func (b Service) Put() (object.PutObjectStream, error) {
func (b Service) Head(
ctx context.Context,
request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(ctx, "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -224,6 +244,9 @@ func (b Service) Head(
return nil, eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
resp, err := b.next.Head(ctx, request)
if err == nil {
if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
@@ -235,6 +258,14 @@ func (b Service) Head(
}
func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
id, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -275,6 +306,9 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr
return eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
return b.next.Search(request, &searchStreamBasicChecker{
checker: b.checker,
SearchStream: stream,
@@ -285,6 +319,14 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr
func (b Service) Delete(
ctx context.Context,
request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(ctx, "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -332,10 +374,21 @@ func (b Service) Delete(
return nil, eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
return b.next.Delete(ctx, request)
}
func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -383,6 +436,9 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb
return eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
return b.next.GetRange(request, &rangeStreamBasicChecker{
checker: b.checker,
GetObjectRangeStream: stream,
@@ -393,6 +449,14 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb
func (b Service) GetRangeHash(
ctx context.Context,
request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(ctx, "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -440,10 +504,21 @@ func (b Service) GetRangeHash(
return nil, eACLErr(reqInfo, err)
}
+ span.End()
+ spanClosed = true
+
return b.next.GetRangeHash(ctx, request)
}
func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
+ spanClosed := false
+ _, span := tracing.StartSpanFromContext(ctx, "checkACL")
+ defer func() {
+ if !spanClosed {
+ span.End()
+ }
+ }()
+
body := request.GetBody()
if body == nil {
return errEmptyBody
@@ -512,6 +587,9 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe
}
}
+ span.End()
+ spanClosed = true
+
return p.next.Send(ctx, request)
}
From 57789802527e848577bea4e9567f6bb6587bef4d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 11 Apr 2023 18:21:13 +0300
Subject: [PATCH 0089/1943] [#135] signature: Add tracing
Add tracing to verify request and sign response.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/sign.go | 6 +++---
pkg/services/util/sign.go | 31 ++++++++++++++++++++-----------
2 files changed, 23 insertions(+), 14 deletions(-)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 9d66c76ba..4eb5be365 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -54,7 +54,7 @@ func (s *getStreamSigner) Send(resp *object.GetResponse) error {
}
func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
+ return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.GetResponse))
},
@@ -126,7 +126,7 @@ func (s *searchStreamSigner) Send(resp *object.SearchResponse) error {
}
func (s *SignService) Search(req *object.SearchRequest, stream SearchStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
+ return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.SearchResponse))
},
@@ -176,7 +176,7 @@ func (s *getRangeStreamSigner) Send(resp *object.GetRangeResponse) error {
}
func (s *SignService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
- return s.sigSvc.HandleServerStreamRequest(req,
+ return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.GetRangeResponse))
},
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index cb4be3084..dbfde7051 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -67,8 +68,7 @@ func (s *RequestMessageStreamer) Send(ctx context.Context, req any) error {
var err error
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
+ if err = verifyRequestSignature(ctx, req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
err = s.send(ctx, req)
@@ -112,7 +112,7 @@ func (s *RequestMessageStreamer) CloseAndRecv(ctx context.Context) (ResponseMess
setStatusV2(resp, err)
}
- if err = signResponse(s.key, resp, s.statusSupported); err != nil {
+ if err = signResponse(ctx, s.key, resp, s.statusSupported); err != nil {
return nil, err
}
@@ -130,6 +130,7 @@ func (s *SignService) CreateRequestStreamer(sender RequestMessageWriter, closer
}
func (s *SignService) HandleServerStreamRequest(
+ ctx context.Context,
req any,
respWriter ResponseMessageWriter,
blankResp ResponseConstructor,
@@ -142,12 +143,11 @@ func (s *SignService) HandleServerStreamRequest(
var err error
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
+ if err = verifyRequestSignature(ctx, req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
err = respWriterCaller(func(resp ResponseMessage) error {
- if err := signResponse(s.key, resp, statusSupported); err != nil {
+ if err := signResponse(ctx, s.key, resp, statusSupported); err != nil {
return err
}
@@ -164,7 +164,7 @@ func (s *SignService) HandleServerStreamRequest(
setStatusV2(resp, err)
- _ = signResponse(s.key, resp, false) // panics or returns nil with false arg
+ _ = signResponse(ctx, s.key, resp, false) // panics or returns nil with false arg
return respWriter(resp)
}
@@ -183,8 +183,7 @@ func (s *SignService) HandleUnaryRequest(ctx context.Context, req any, handler U
err error
)
- // verify request signatures
- if err = signature.VerifyServiceMessage(req); err != nil {
+ if err = verifyRequestSignature(ctx, req); err != nil {
var sigErr apistatus.SignatureVerification
sigErr.SetMessage(err.Error())
@@ -205,7 +204,7 @@ func (s *SignService) HandleUnaryRequest(ctx context.Context, req any, handler U
}
// sign the response
- if err = signResponse(s.key, resp, statusSupported); err != nil {
+ if err = signResponse(ctx, s.key, resp, statusSupported); err != nil {
return nil, err
}
@@ -233,7 +232,10 @@ func setStatusV2(resp ResponseMessage, err error) {
// The signature error affects the result depending on the protocol version:
// - if status return is supported, panics since we cannot return the failed status, because it will not be signed;
// - otherwise, returns error in order to transport it directly.
-func signResponse(key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
+func signResponse(ctx context.Context, key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
+ _, span := tracing.StartSpanFromContext(ctx, "signResponse")
+ defer span.End()
+
err := signature.SignServiceMessage(key, resp)
if err != nil {
err = fmt.Errorf("could not sign response: %w", err)
@@ -247,3 +249,10 @@ func signResponse(key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
return err
}
+
+func verifyRequestSignature(ctx context.Context, req any) error {
+ _, span := tracing.StartSpanFromContext(ctx, "verifyRequestSignature")
+ defer span.End()
+
+ return signature.VerifyServiceMessage(req)
+}
From 5d2affa5cd104104d266e2bbff9534e71125789e Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 10:59:57 +0300
Subject: [PATCH 0090/1943] testutil: Fix linter warning
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/internal/testutil/generators.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go
index 4ee92db88..1a1f3cf9e 100644
--- a/pkg/local_object_storage/internal/testutil/generators.go
+++ b/pkg/local_object_storage/internal/testutil/generators.go
@@ -82,7 +82,7 @@ var _ ObjectGenerator = &RandObjGenerator{}
func (g *RandObjGenerator) Next() *object.Object {
var id oid.ID
- rand.Read(id[:])
+ _, _ = rand.Read(id[:])
return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize)
}
From 08769f413f207f8df65f9a92f4ce877ee7acb36b Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 16:54:04 +0300
Subject: [PATCH 0091/1943] Revert "[#135] acl: Add tracing spans"
This reverts commit b2ca73054722fcc4f73746d75b1599377c82040a.
---
pkg/services/object/acl/v2/service.go | 78 ---------------------------
1 file changed, 78 deletions(-)
diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go
index 93c1c65f8..6544d78d7 100644
--- a/pkg/services/object/acl/v2/service.go
+++ b/pkg/services/object/acl/v2/service.go
@@ -6,7 +6,6 @@ import (
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
@@ -112,14 +111,6 @@ func New(opts ...Option) Service {
// Get implements ServiceServer interface, makes ACL checks and calls
// next Get method in the ServiceServer pipeline.
func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -167,9 +158,6 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream
return eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
return b.next.Get(request, &getStreamBasicChecker{
GetObjectStream: stream,
info: reqInfo,
@@ -189,14 +177,6 @@ func (b Service) Put() (object.PutObjectStream, error) {
func (b Service) Head(
ctx context.Context,
request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(ctx, "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -244,9 +224,6 @@ func (b Service) Head(
return nil, eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
resp, err := b.next.Head(ctx, request)
if err == nil {
if err = b.checker.CheckEACL(resp, reqInfo); err != nil {
@@ -258,14 +235,6 @@ func (b Service) Head(
}
func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
id, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -306,9 +275,6 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr
return eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
return b.next.Search(request, &searchStreamBasicChecker{
checker: b.checker,
SearchStream: stream,
@@ -319,14 +285,6 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr
func (b Service) Delete(
ctx context.Context,
request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(ctx, "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -374,21 +332,10 @@ func (b Service) Delete(
return nil, eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
return b.next.Delete(ctx, request)
}
func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(stream.Context(), "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return err
@@ -436,9 +383,6 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb
return eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
return b.next.GetRange(request, &rangeStreamBasicChecker{
checker: b.checker,
GetObjectRangeStream: stream,
@@ -449,14 +393,6 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb
func (b Service) GetRangeHash(
ctx context.Context,
request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(ctx, "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
cnr, err := getContainerIDFromRequest(request)
if err != nil {
return nil, err
@@ -504,21 +440,10 @@ func (b Service) GetRangeHash(
return nil, eACLErr(reqInfo, err)
}
- span.End()
- spanClosed = true
-
return b.next.GetRangeHash(ctx, request)
}
func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error {
- spanClosed := false
- _, span := tracing.StartSpanFromContext(ctx, "checkACL")
- defer func() {
- if !spanClosed {
- span.End()
- }
- }()
-
body := request.GetBody()
if body == nil {
return errEmptyBody
@@ -587,9 +512,6 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe
}
}
- span.End()
- spanClosed = true
-
return p.next.Send(ctx, request)
}
From 04727ce1d6fd96646698a7c3357d1477ed24836b Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 16:56:56 +0300
Subject: [PATCH 0092/1943] Revert "[#135] signature: Add tracing"
This reverts commit 57789802527e848577bea4e9567f6bb6587bef4d.
---
pkg/services/object/sign.go | 6 +++---
pkg/services/util/sign.go | 31 +++++++++++--------------------
2 files changed, 14 insertions(+), 23 deletions(-)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 4eb5be365..9d66c76ba 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -54,7 +54,7 @@ func (s *getStreamSigner) Send(resp *object.GetResponse) error {
}
func (s *SignService) Get(req *object.GetRequest, stream GetObjectStream) error {
- return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
+ return s.sigSvc.HandleServerStreamRequest(req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.GetResponse))
},
@@ -126,7 +126,7 @@ func (s *searchStreamSigner) Send(resp *object.SearchResponse) error {
}
func (s *SignService) Search(req *object.SearchRequest, stream SearchStream) error {
- return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
+ return s.sigSvc.HandleServerStreamRequest(req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.SearchResponse))
},
@@ -176,7 +176,7 @@ func (s *getRangeStreamSigner) Send(resp *object.GetRangeResponse) error {
}
func (s *SignService) GetRange(req *object.GetRangeRequest, stream GetObjectRangeStream) error {
- return s.sigSvc.HandleServerStreamRequest(stream.Context(), req,
+ return s.sigSvc.HandleServerStreamRequest(req,
func(resp util.ResponseMessage) error {
return stream.Send(resp.(*object.GetRangeResponse))
},
diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go
index dbfde7051..cb4be3084 100644
--- a/pkg/services/util/sign.go
+++ b/pkg/services/util/sign.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -68,7 +67,8 @@ func (s *RequestMessageStreamer) Send(ctx context.Context, req any) error {
var err error
- if err = verifyRequestSignature(ctx, req); err != nil {
+ // verify request signatures
+ if err = signature.VerifyServiceMessage(req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
err = s.send(ctx, req)
@@ -112,7 +112,7 @@ func (s *RequestMessageStreamer) CloseAndRecv(ctx context.Context) (ResponseMess
setStatusV2(resp, err)
}
- if err = signResponse(ctx, s.key, resp, s.statusSupported); err != nil {
+ if err = signResponse(s.key, resp, s.statusSupported); err != nil {
return nil, err
}
@@ -130,7 +130,6 @@ func (s *SignService) CreateRequestStreamer(sender RequestMessageWriter, closer
}
func (s *SignService) HandleServerStreamRequest(
- ctx context.Context,
req any,
respWriter ResponseMessageWriter,
blankResp ResponseConstructor,
@@ -143,11 +142,12 @@ func (s *SignService) HandleServerStreamRequest(
var err error
- if err = verifyRequestSignature(ctx, req); err != nil {
+ // verify request signatures
+ if err = signature.VerifyServiceMessage(req); err != nil {
err = fmt.Errorf("could not verify request: %w", err)
} else {
err = respWriterCaller(func(resp ResponseMessage) error {
- if err := signResponse(ctx, s.key, resp, statusSupported); err != nil {
+ if err := signResponse(s.key, resp, statusSupported); err != nil {
return err
}
@@ -164,7 +164,7 @@ func (s *SignService) HandleServerStreamRequest(
setStatusV2(resp, err)
- _ = signResponse(ctx, s.key, resp, false) // panics or returns nil with false arg
+ _ = signResponse(s.key, resp, false) // panics or returns nil with false arg
return respWriter(resp)
}
@@ -183,7 +183,8 @@ func (s *SignService) HandleUnaryRequest(ctx context.Context, req any, handler U
err error
)
- if err = verifyRequestSignature(ctx, req); err != nil {
+ // verify request signatures
+ if err = signature.VerifyServiceMessage(req); err != nil {
var sigErr apistatus.SignatureVerification
sigErr.SetMessage(err.Error())
@@ -204,7 +205,7 @@ func (s *SignService) HandleUnaryRequest(ctx context.Context, req any, handler U
}
// sign the response
- if err = signResponse(ctx, s.key, resp, statusSupported); err != nil {
+ if err = signResponse(s.key, resp, statusSupported); err != nil {
return nil, err
}
@@ -232,10 +233,7 @@ func setStatusV2(resp ResponseMessage, err error) {
// The signature error affects the result depending on the protocol version:
// - if status return is supported, panics since we cannot return the failed status, because it will not be signed;
// - otherwise, returns error in order to transport it directly.
-func signResponse(ctx context.Context, key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
- _, span := tracing.StartSpanFromContext(ctx, "signResponse")
- defer span.End()
-
+func signResponse(key *ecdsa.PrivateKey, resp any, statusSupported bool) error {
err := signature.SignServiceMessage(key, resp)
if err != nil {
err = fmt.Errorf("could not sign response: %w", err)
@@ -249,10 +247,3 @@ func signResponse(ctx context.Context, key *ecdsa.PrivateKey, resp any, statusSu
return err
}
-
-func verifyRequestSignature(ctx context.Context, req any) error {
- _, span := tracing.StartSpanFromContext(ctx, "verifyRequestSignature")
- defer span.End()
-
- return signature.VerifyServiceMessage(req)
-}
From 7d39fecc6a84b19bb45af2b2cbf7298aedf8fa1f Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 10:45:43 +0300
Subject: [PATCH 0093/1943] Release v0.36.0
Signed-off-by: Evgenii Stratonikov
---
CHANGELOG.md | 9 +++++++++
VERSION | 2 +-
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0e07bb2f2..b4d6e7ca0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,15 @@ Changelog for FrostFS Node
## [Unreleased]
+### Added
+### Changed
+### Fixed
+### Removed
+### Updated
+### Updating from v0.36.0
+
+## [v0.36.0] - 2023-04-12 - Furtwängler
+
### Added
- Add GAS pouring mechanism for a configurable list of wallets (#128)
- Separate batching for replicated operations over the same container in pilorama (#1621)
diff --git a/VERSION b/VERSION
index ab4e51c67..e1d6235d3 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v0.35.0
+v0.36.0
From 01c0c90a869ef9f77ed412bd5a26c539579dacf7 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Thu, 6 Apr 2023 18:22:59 +0300
Subject: [PATCH 0094/1943] [#113] cli: add "name" option for "get container"
command
* Make get container command filter out the container by attribute name
Signed-off-by: Airat Arifullin a.arifullin@yadro.com
---
cmd/frostfs-cli/modules/container/list.go | 47 +++++++++++++++--------
1 file changed, 31 insertions(+), 16 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index 9565748c3..33dd17943 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
)
@@ -16,12 +17,14 @@ import (
const (
flagListPrintAttr = "with-attr"
flagListContainerOwner = "owner"
+ flagListName = "name"
)
// flag vars of list command.
var (
flagVarListPrintAttr bool
flagVarListContainerOwner string
+ flagVarListName string
)
var listContainersCmd = &cobra.Command{
@@ -52,24 +55,33 @@ var listContainersCmd = &cobra.Command{
var prmGet internalclient.GetContainerPrm
prmGet.SetClient(cli)
- list := res.IDList()
- for i := range list {
- cmd.Println(list[i].String())
+ containerIDs := res.IDList()
+ for _, cnrID := range containerIDs {
+ if flagVarListName == "" && !flagVarListPrintAttr {
+ cmd.Println(cnrID.String())
+ continue
+ }
+
+ prmGet.SetContainer(cnrID)
+ res, err := internalclient.GetContainer(prmGet)
+ if err != nil {
+ cmd.Printf(" failed to read attributes: %v\n", err)
+ continue
+ }
+
+ cnr := res.Container()
+ if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
+ continue
+ }
+ cmd.Println(cnrID.String())
if flagVarListPrintAttr {
- prmGet.SetContainer(list[i])
-
- res, err := internalclient.GetContainer(prmGet)
- if err == nil {
- res.Container().IterateAttributes(func(key, val string) {
- if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes
- cmd.Printf(" %s: %s\n", key, val)
- }
- })
- } else {
- cmd.Printf(" failed to read attributes: %v\n", err)
- }
+ cnr.IterateAttributes(func(key, val string) {
+ if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
+ // FIXME(@cthulhu-rider): neofs-sdk-go#314 use dedicated method to skip system attributes
+ cmd.Printf(" %s: %s\n", key, val)
+ }
+ })
}
}
},
@@ -80,6 +92,9 @@ func initContainerListContainersCmd() {
flags := listContainersCmd.Flags()
+ flags.StringVar(&flagVarListName, flagListName, "",
+ "List containers by the attribute name",
+ )
flags.StringVar(&flagVarListContainerOwner, flagListContainerOwner, "",
"Owner of containers (omit to use owner from private key)",
)
From 2f1beddfd3b9ef3d649321eda2d11b32abf3475e Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 13:52:14 +0300
Subject: [PATCH 0095/1943] [#202] adm: Remove deprecated warnings in tests
`VerifyBlocks` is now `SkipBlockVerification` and is false by default.
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/modules/morph/initialize_test.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
index 39a35b12e..fb2dc3e3f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_test.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"strconv"
"testing"
+ "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/config"
@@ -101,11 +102,10 @@ func generateTestData(t *testing.T, dir string, size int) {
cfg := config.Config{}
cfg.ProtocolConfiguration.Magic = 12345
cfg.ProtocolConfiguration.ValidatorsCount = size
- cfg.ProtocolConfiguration.SecondsPerBlock = 1 //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
+ cfg.ProtocolConfiguration.TimePerBlock = time.Second
cfg.ProtocolConfiguration.StandbyCommittee = pubs // sorted by glagolic letters
cfg.ProtocolConfiguration.P2PSigExtensions = true
cfg.ProtocolConfiguration.VerifyTransactions = true
- cfg.ProtocolConfiguration.VerifyBlocks = true //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
data, err := yaml.Marshal(cfg)
require.NoError(t, err)
From 96b38f7e86afbb8c9ad13fb174c34f76633b259f Mon Sep 17 00:00:00 2001
From: Roman Khimov
Date: Fri, 17 Feb 2023 21:28:35 +0300
Subject: [PATCH 0096/1943] [#239] morph/client: Add CalledByEntry into the
"grouped" scope
Fixes #2230, fixes #2263. CustomGroups are nice while we're only calling NeoFS
contracts, but it doesn't work at all for standard ones like GAS or Notary.
Signed-off-by: Roman Khimov
Signed-off-by: Evgenii Stratonikov
---
CHANGELOG.md | 1 +
pkg/morph/client/nns.go | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b4d6e7ca0..b13469d06 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -64,6 +64,7 @@ Changelog for FrostFS Node
- Iterating over just removed files by FSTree (#98)
- Parts of a locked object could not be removed anymore (#141)
- Non-alphabet nodes do not try to handle alphabet events (#181)
+- Failing SN and IR transactions because of incorrect scopes (#2230, #2263)
### Removed
### Updated
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index 0a23aa47a..c3b900ddb 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -208,7 +208,7 @@ func (c *Client) SetGroupSignerScope() error {
return err
}
- c.signer.Scopes = transaction.CustomGroups
+ c.signer.Scopes = transaction.CustomGroups | transaction.CalledByEntry
c.signer.AllowedGroups = []*keys.PublicKey{pub}
return nil
}
From be4df989e5151b0031f64c09f26f9c49325fa199 Mon Sep 17 00:00:00 2001
From: Roman Khimov
Date: Fri, 17 Feb 2023 22:27:29 +0300
Subject: [PATCH 0097/1943] [#239] morph/client: Deduplicate signers in Client
a bit
One signer in the cfg is enough.
Signed-off-by: Roman Khimov
Signed-off-by: Evgenii Stratonikov
---
pkg/morph/client/client.go | 2 --
pkg/morph/client/constructor.go | 1 -
pkg/morph/client/nns.go | 6 ++++--
pkg/morph/client/notary.go | 12 ++++++------
4 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index b93c5f75f..5e98211c4 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -57,8 +57,6 @@ type Client struct {
acc *wallet.Account // neo account
accAddr util.Uint160 // account's address
- signer *transaction.Signer
-
notary *notaryInfo
cfg cfg
diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go
index 9ed275029..c4ec70171 100644
--- a/pkg/morph/client/constructor.go
+++ b/pkg/morph/client/constructor.go
@@ -105,7 +105,6 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er
logger: cfg.logger,
acc: acc,
accAddr: accAddr,
- signer: cfg.signer,
cfg: *cfg,
switchLock: &sync.RWMutex{},
notifications: make(chan rpcclient.Notification),
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index c3b900ddb..e61cb8e15 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -208,8 +208,10 @@ func (c *Client) SetGroupSignerScope() error {
return err
}
- c.signer.Scopes = transaction.CustomGroups | transaction.CalledByEntry
- c.signer.AllowedGroups = []*keys.PublicKey{pub}
+ c.cfg.signer = &transaction.Signer{
+ Scopes: transaction.CustomGroups | transaction.CalledByEntry,
+ AllowedGroups: []*keys.PublicKey{pub},
+ }
return nil
}
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 96dca0319..069c35782 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -596,18 +596,18 @@ func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, comm
s = append(s, transaction.Signer{
Account: hash.Hash160(multisigScript),
- Scopes: c.signer.Scopes,
- AllowedContracts: c.signer.AllowedContracts,
- AllowedGroups: c.signer.AllowedGroups,
+ Scopes: c.cfg.signer.Scopes,
+ AllowedContracts: c.cfg.signer.AllowedContracts,
+ AllowedGroups: c.cfg.signer.AllowedGroups,
})
if !invokedByAlpha {
// then we have invoker signature
s = append(s, transaction.Signer{
Account: hash.Hash160(c.acc.GetVerificationScript()),
- Scopes: c.signer.Scopes,
- AllowedContracts: c.signer.AllowedContracts,
- AllowedGroups: c.signer.AllowedGroups,
+ Scopes: c.cfg.signer.Scopes,
+ AllowedContracts: c.cfg.signer.AllowedContracts,
+ AllowedGroups: c.cfg.signer.AllowedGroups,
})
}
From f41ad9d419d925195656ccf56ff764ad3ef57f75 Mon Sep 17 00:00:00 2001
From: Roman Khimov
Date: Fri, 17 Feb 2023 22:32:31 +0300
Subject: [PATCH 0098/1943] [#239] morph/client: Recreate actor/wrappers in
SetGroupSignerScope
That's the reason #2230 and #2263 were not detected earlier, we actually had
Global scope being used before reconnection to RPC node.
Signed-off-by: Roman Khimov
Signed-off-by: Evgenii Stratonikov
---
CHANGELOG.md | 1 +
pkg/morph/client/nns.go | 10 +++++++++-
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b13469d06..270d0265e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -65,6 +65,7 @@ Changelog for FrostFS Node
- Parts of a locked object could not be removed anymore (#141)
- Non-alphabet nodes do not try to handle alphabet events (#181)
- Failing SN and IR transactions because of incorrect scopes (#2230, #2263)
+- Global scope used for some transactions (#2230, #2263)
### Removed
### Updated
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index e61cb8e15..473b3500b 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -208,10 +208,18 @@ func (c *Client) SetGroupSignerScope() error {
return err
}
- c.cfg.signer = &transaction.Signer{
+ // Don't change c before everything is OK.
+ cfg := c.cfg
+ cfg.signer = &transaction.Signer{
Scopes: transaction.CustomGroups | transaction.CalledByEntry,
AllowedGroups: []*keys.PublicKey{pub},
}
+ rpcActor, err := newActor(c.client, c.acc, cfg)
+ if err != nil {
+ return err
+ }
+ c.cfg = cfg
+ c.setActor(rpcActor)
return nil
}
From d686ab49e80f33fa91b213ff9ceeb3caecff2597 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 13:58:54 +0300
Subject: [PATCH 0099/1943] [#202] adm: Remove deprecated RPC client methods
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/modules/morph/initialize_nns.go | 8 +++++---
.../internal/modules/morph/initialize_register.go | 8 +++++---
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
index edb7d6de5..15657a6d9 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_nns.go
@@ -15,6 +15,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
+ nnsClient "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -284,10 +285,11 @@ func parseNNSResolveResult(res stackitem.Item) (util.Uint160, error) {
}
func nnsIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) {
- switch ct := c.(type) {
+ switch c.(type) {
case *rpcclient.Client:
- //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
- return ct.NNSIsAvailable(nnsHash, name)
+ inv := invoker.New(c, nil)
+ reader := nnsClient.NewReader(inv, nnsHash)
+ return reader.IsAvailable(name)
default:
b, err := unwrap.Bool(invokeFunction(c, nnsHash, "isAvailable", []any{name}, nil))
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
index 27e1590cf..b1542cc92 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_register.go
@@ -9,6 +9,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/neo"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
@@ -116,10 +117,11 @@ func (c *initializeContext) transferNEOFinished(neoHash util.Uint160) (bool, err
var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response")
func (c *initializeContext) getCandidateRegisterPrice() (int64, error) {
- switch ct := c.Client.(type) {
+ switch c.Client.(type) {
case *rpcclient.Client:
- //lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
- return ct.GetCandidateRegisterPrice()
+ inv := invoker.New(c.Client, nil)
+ reader := neo.NewReader(inv)
+ return reader.GetRegisterPrice()
default:
neoHash := neo.Hash
res, err := invokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil)
From d29b13454f1837202e597b5179626ceee50160bb Mon Sep 17 00:00:00 2001
From: Roman Khimov
Date: Fri, 17 Feb 2023 23:43:22 +0300
Subject: [PATCH 0100/1943] [#239] morph/client: Simplify code interacting with
magic numbers
It can't be uint64 in fact, but this error is buried deeply in the NetworkInfo
API structure, so we're not touching MagicNumber() for now.
Signed-off-by: Roman Khimov
Signed-off-by: Evgenii Stratonikov
---
pkg/morph/client/notary.go | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 069c35782..7399c19cd 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -412,16 +412,11 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- // error appears only if client
- // is in inactive mode; that has
- // been already checked above
- magicNumber, _ := c.MagicNumber()
-
// mainTX is expected to be pre-validated: second witness must exist and be empty
mainTx.Scripts[1].VerificationScript = multiaddrAccount.GetVerificationScript()
mainTx.Scripts[1].InvocationScript = append(
[]byte{byte(opcode.PUSHDATA1), 64},
- multiaddrAccount.PrivateKey().SignHashable(uint32(magicNumber), mainTx)...,
+ multiaddrAccount.SignHashable(c.rpcActor.GetNetwork(), mainTx)...,
)
//lint:ignore SA1019 https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/202
@@ -667,12 +662,12 @@ func (c *Client) notaryWitnesses(invokedByAlpha bool, multiaddr *wallet.Account,
// to pass Notary module verification
var invokeScript []byte
- magicNumber, _ := c.MagicNumber()
+ magicNumber := c.rpcActor.GetNetwork()
if invokedByAlpha {
invokeScript = append(
[]byte{byte(opcode.PUSHDATA1), 64},
- multiaddr.PrivateKey().SignHashable(uint32(magicNumber), tx)...,
+ multiaddr.SignHashable(magicNumber, tx)...,
)
} else {
// we can't provide alphabet node signature
@@ -694,7 +689,7 @@ func (c *Client) notaryWitnesses(invokedByAlpha bool, multiaddr *wallet.Account,
// then we have invoker witness
invokeScript = append(
[]byte{byte(opcode.PUSHDATA1), 64},
- c.acc.PrivateKey().SignHashable(uint32(magicNumber), tx)...,
+ c.acc.SignHashable(magicNumber, tx)...,
)
w = append(w, transaction.Witness{
From 0e31c12e63d1082cca5775b661104c5b47825326 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 12 Apr 2023 17:35:10 +0300
Subject: [PATCH 0101/1943] [#240] logs: Move log messages to constants
Drop duplicate entities.
Format entities.
Signed-off-by: Dmitrii Stepanov
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-ir/main.go | 9 +-
cmd/frostfs-node/config.go | 41 +-
cmd/frostfs-node/container.go | 13 +-
cmd/frostfs-node/control.go | 3 +-
cmd/frostfs-node/grpc.go | 15 +-
cmd/frostfs-node/main.go | 5 +-
cmd/frostfs-node/morph.go | 21 +-
cmd/frostfs-node/netmap.go | 9 +-
cmd/frostfs-node/notificator.go | 11 +-
cmd/frostfs-node/object.go | 7 +-
cmd/frostfs-node/reputation.go | 9 +-
cmd/frostfs-node/reputation/common/remote.go | 7 +-
.../reputation/intermediate/consumers.go | 3 +-
.../reputation/intermediate/contract.go | 3 +-
.../reputation/intermediate/daughters.go | 3 +-
.../reputation/intermediate/remote.go | 3 +-
cmd/frostfs-node/reputation/local/remote.go | 3 +-
cmd/frostfs-node/reputation/local/storage.go | 3 +-
cmd/frostfs-node/tracing.go | 5 +-
cmd/frostfs-node/tree.go | 11 +-
internal/logs/logs.go | 643 ++++++++++++++++++
pkg/innerring/blocktimer.go | 5 +-
pkg/innerring/initialization.go | 13 +-
pkg/innerring/innerring.go | 13 +-
pkg/innerring/notary.go | 7 +-
pkg/innerring/processors/alphabet/handlers.go | 5 +-
.../processors/alphabet/process_emit.go | 19 +-
.../processors/alphabet/processor.go | 3 +-
pkg/innerring/processors/audit/handlers.go | 5 +-
pkg/innerring/processors/audit/process.go | 21 +-
pkg/innerring/processors/audit/scheduler.go | 3 +-
pkg/innerring/processors/balance/handlers.go | 5 +-
.../processors/balance/process_assets.go | 5 +-
pkg/innerring/processors/balance/processor.go | 3 +-
.../processors/container/handlers.go | 13 +-
.../processors/container/process_container.go | 13 +-
.../processors/container/process_eacl.go | 7 +-
.../processors/container/processor.go | 3 +-
pkg/innerring/processors/frostfs/handlers.go | 25 +-
.../processors/frostfs/process_assets.go | 23 +-
.../processors/frostfs/process_bind.go | 7 +-
.../processors/frostfs/process_config.go | 5 +-
pkg/innerring/processors/frostfs/processor.go | 3 +-
.../processors/governance/handlers.go | 5 +-
.../processors/governance/process_update.go | 29 +-
pkg/innerring/processors/netmap/handlers.go | 27 +-
.../processors/netmap/process_cleanup.go | 11 +-
.../processors/netmap/process_epoch.go | 17 +-
.../processors/netmap/process_peers.go | 35 +-
pkg/innerring/processors/netmap/processor.go | 3 +-
.../processors/reputation/handlers.go | 5 +-
.../processors/reputation/process_put.go | 11 +-
.../processors/reputation/processor.go | 3 +-
.../processors/settlement/audit/calculate.go | 47 +-
.../processors/settlement/basic/collect.go | 9 +-
.../processors/settlement/basic/distribute.go | 5 +-
pkg/innerring/processors/settlement/calls.go | 29 +-
.../processors/settlement/handlers.go | 9 +-
.../processors/settlement/processor.go | 3 +-
pkg/innerring/rpc.go | 5 +-
pkg/innerring/settlement.go | 5 +-
pkg/innerring/state.go | 13 +-
pkg/innerring/subnet.go | 23 +-
.../blobovnicza/control.go | 13 +-
.../blobovnicza/delete.go | 3 +-
.../blobstor/blobovniczatree/blobovnicza.go | 11 +-
.../blobstor/blobovniczatree/control.go | 11 +-
.../blobstor/blobovniczatree/delete.go | 7 +-
.../blobstor/blobovniczatree/exists.go | 3 +-
.../blobstor/blobovniczatree/get.go | 7 +-
.../blobstor/blobovniczatree/get_range.go | 7 +-
.../blobstor/blobovniczatree/put.go | 15 +-
pkg/local_object_storage/blobstor/control.go | 9 +-
pkg/local_object_storage/blobstor/exists.go | 3 +-
pkg/local_object_storage/blobstor/iterate.go | 3 +-
pkg/local_object_storage/engine/control.go | 15 +-
pkg/local_object_storage/engine/delete.go | 5 +-
pkg/local_object_storage/engine/engine.go | 11 +-
pkg/local_object_storage/engine/evacuate.go | 7 +-
pkg/local_object_storage/engine/inhume.go | 7 +-
pkg/local_object_storage/engine/put.go | 5 +-
.../engine/remove_copies.go | 9 +-
pkg/local_object_storage/engine/shards.go | 5 +-
pkg/local_object_storage/metabase/control.go | 7 +-
pkg/local_object_storage/metabase/select.go | 17 +-
pkg/local_object_storage/shard/control.go | 15 +-
pkg/local_object_storage/shard/delete.go | 7 +-
pkg/local_object_storage/shard/gc.go | 43 +-
pkg/local_object_storage/shard/get.go | 7 +-
pkg/local_object_storage/shard/inhume.go | 3 +-
pkg/local_object_storage/shard/list.go | 3 +-
pkg/local_object_storage/shard/mode.go | 5 +-
pkg/local_object_storage/shard/move.go | 3 +-
pkg/local_object_storage/shard/put.go | 3 +-
pkg/local_object_storage/shard/shard.go | 7 +-
pkg/local_object_storage/writecache/flush.go | 3 +-
pkg/local_object_storage/writecache/init.go | 9 +-
pkg/local_object_storage/writecache/mode.go | 3 +-
.../writecache/storage.go | 7 +-
pkg/morph/client/client.go | 11 +-
pkg/morph/client/multi.go | 15 +-
pkg/morph/client/notary.go | 9 +-
pkg/morph/client/notifications.go | 7 +-
pkg/morph/event/listener.go | 69 +-
pkg/morph/event/utils.go | 3 +-
pkg/morph/subscriber/subscriber.go | 15 +-
pkg/services/audit/auditor/context.go | 7 +-
pkg/services/audit/auditor/pdp.go | 5 +-
pkg/services/audit/auditor/pop.go | 5 +-
pkg/services/audit/auditor/por.go | 11 +-
pkg/services/audit/taskmanager/listen.go | 13 +-
.../announcement/load/controller/calls.go | 33 +-
.../announcement/load/route/calls.go | 9 +-
pkg/services/notificator/nats/service.go | 7 +-
pkg/services/notificator/service.go | 5 +-
pkg/services/object/acl/v2/classifier.go | 5 +-
pkg/services/object/delete/container.go | 4 +-
pkg/services/object/delete/delete.go | 7 +-
pkg/services/object/delete/exec.go | 21 +-
pkg/services/object/delete/local.go | 13 +-
pkg/services/object/get/assemble.go | 11 +-
pkg/services/object/get/container.go | 13 +-
pkg/services/object/get/exec.go | 11 +-
pkg/services/object/get/get.go | 13 +-
pkg/services/object/get/local.go | 3 +-
pkg/services/object/get/remote.go | 5 +-
pkg/services/object/put/distributed.go | 3 +-
pkg/services/object/search/container.go | 17 +-
pkg/services/object/search/exec.go | 7 +-
pkg/services/object/search/local.go | 3 +-
pkg/services/object/search/search.go | 7 +-
pkg/services/object/util/log.go | 5 +-
.../object_manager/tombstone/checker.go | 3 +-
pkg/services/policer/check.go | 17 +-
pkg/services/policer/process.go | 9 +-
pkg/services/replicator/process.go | 9 +-
pkg/services/reputation/common/managers.go | 3 +-
.../reputation/common/router/calls.go | 9 +-
.../reputation/eigentrust/calculator/calls.go | 37 +-
.../reputation/eigentrust/controller/calls.go | 5 +-
.../reputation/eigentrust/routes/calls.go | 3 +-
.../reputation/local/controller/calls.go | 19 +-
pkg/services/reputation/local/routes/calls.go | 3 +-
pkg/services/session/executor.go | 3 +-
.../session/storage/persistent/storage.go | 7 +-
pkg/services/tree/redirect.go | 3 +-
pkg/services/tree/replicator.go | 9 +-
pkg/services/tree/signature.go | 3 +-
pkg/services/tree/sync.go | 29 +-
149 files changed, 1481 insertions(+), 687 deletions(-)
create mode 100644 internal/logs/logs.go
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index e4386a083..5db1db6b6 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -9,6 +9,7 @@ import (
"os/signal"
"syscall"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
@@ -80,13 +81,13 @@ func main() {
err = innerRing.Start(ctx, intErr)
exitErr(err)
- log.Info("application started",
+ log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
select {
case <-ctx.Done():
case err := <-intErr:
- log.Info("internal error", zap.String("msg", err.Error()))
+ log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
}
innerRing.Stop()
@@ -98,14 +99,14 @@ func main() {
go func() {
err := srv.Shutdown()
if err != nil {
- log.Debug("could not shutdown HTTP server",
+ log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
)
}
}()
}
- log.Info("application stopped")
+ log.Info(logs.FrostFSIRApplicationStopped)
}
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index d81e47b17..d110665f5 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -29,6 +29,7 @@ import (
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -342,13 +343,13 @@ type internals struct {
func (c *cfg) startMaintenance() {
c.isMaintenance.Store(true)
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
- c.log.Info("started local node's maintenance")
+ c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
}
// stops node's maintenance.
func (c *internals) stopMaintenance() {
c.isMaintenance.Store(false)
- c.log.Info("stopped local node's maintenance")
+ c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
}
// IsMaintenance checks if storage node is under maintenance.
@@ -881,10 +882,10 @@ func initLocalStorage(c *cfg) {
for _, optsWithMeta := range c.shardOpts() {
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
if err != nil {
- c.log.Error("failed to attach shard to engine", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
shardsAttached++
- c.log.Info("shard attached to engine", zap.Stringer("id", id))
+ c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
}
}
if shardsAttached == 0 {
@@ -894,15 +895,15 @@ func initLocalStorage(c *cfg) {
c.cfgObject.cfgLocalStorage.localStorage = ls
c.onShutdown(func() {
- c.log.Info("closing components of the storage engine...")
+ c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
err := ls.Close()
if err != nil {
- c.log.Info("storage engine closing failure",
+ c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
)
} else {
- c.log.Info("all components of the storage engine closed successfully")
+ c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
}
})
}
@@ -976,11 +977,11 @@ func (c *cfg) bootstrap() error {
// switch to online except when under maintenance
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
- c.log.Info("bootstrapping with the maintenance state")
+ c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
}
- c.log.Info("bootstrapping with online state",
+ c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
zap.Stringer("previous", st),
)
@@ -1015,32 +1016,32 @@ func (c *cfg) signalWatcher(ctx context.Context) {
case syscall.SIGHUP:
c.reloadConfig(ctx)
case syscall.SIGTERM, syscall.SIGINT:
- c.log.Info("termination signal has been received, stopping...")
+ c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
c.shutdown()
- c.log.Info("termination signal processing is complete")
+ c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
return
}
case err := <-c.internalErr: // internal application error
- c.log.Warn("internal application error",
+ c.log.Warn(logs.FrostFSNodeInternalApplicationError,
zap.String("message", err.Error()))
c.shutdown()
- c.log.Info("internal error processing is complete")
+ c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
return
}
}
}
func (c *cfg) reloadConfig(ctx context.Context) {
- c.log.Info("SIGHUP has been received, rereading configuration...")
+ c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
err := c.readConfig(c.appCfg)
if err != nil {
- c.log.Error("configuration reading", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
return
}
@@ -1052,7 +1053,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
logPrm, err := c.loggerPrm()
if err != nil {
- c.log.Error("logger configuration preparation", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
return
}
@@ -1060,7 +1061,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components = append(components, dCmp{"tracing", func() error {
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
if updated {
- c.log.Info("tracing configation updated")
+ c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
return err
}})
@@ -1085,20 +1086,20 @@ func (c *cfg) reloadConfig(ctx context.Context) {
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
if err != nil {
- c.log.Error("storage engine configuration update", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
return
}
for _, component := range components {
err = component.reloadFunc()
if err != nil {
- c.log.Error("updated configuration applying",
+ c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
zap.String("component", component.name),
zap.Error(err))
}
}
- c.log.Info("configuration has been reloaded successfully")
+ c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
func (c *cfg) shutdown() {
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 6c864431d..d5d8601e3 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -11,6 +11,7 @@ import (
containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -136,13 +137,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
} else {
// unlike removal, we expect successful receive of the container
// after successful creation, so logging can be useful
- c.log.Error("read newly created container after the notification",
+ c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
zap.Stringer("id", ev.ID),
zap.Error(err),
)
}
- c.log.Debug("container creation event's receipt",
+ c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@@ -161,7 +162,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cachedContainerStorage.handleRemoval(ev.ID)
- c.log.Debug("container removal event's receipt",
+ c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
zap.Stringer("id", ev.ID),
)
})
@@ -295,7 +296,7 @@ type morphLoadWriter struct {
}
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
- w.log.Debug("save used space announcement in contract",
+ w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
zap.Uint64("epoch", a.Epoch()),
zap.Stringer("cid", a.Container()),
zap.Uint64("size", a.Value()),
@@ -458,7 +459,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
for i := range idList {
sz, err := engine.ContainerSize(d.engine, idList[i])
if err != nil {
- d.log.Debug("failed to calculate container size in storage engine",
+ d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
zap.Stringer("cid", idList[i]),
zap.String("error", err.Error()),
)
@@ -466,7 +467,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
continue
}
- d.log.Debug("container size in storage engine calculated successfully",
+ d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
zap.Uint64("size", sz),
zap.Stringer("cid", idList[i]),
)
diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go
index 5492f585f..f4b068419 100644
--- a/cmd/frostfs-node/control.go
+++ b/cmd/frostfs-node/control.go
@@ -5,6 +5,7 @@ import (
"net"
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
@@ -52,7 +53,7 @@ func initControlService(c *cfg) {
lis, err := net.Listen("tcp", endpoint)
if err != nil {
- c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
return
}
diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go
index f3943f3ff..b0a587782 100644
--- a/cmd/frostfs-node/grpc.go
+++ b/cmd/frostfs-node/grpc.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
"google.golang.org/grpc"
@@ -33,7 +34,7 @@ func initGRPC(c *cfg) {
if tlsCfg != nil {
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
if err != nil {
- c.log.Error("could not read certificate from file", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
return
}
@@ -63,7 +64,7 @@ func initGRPC(c *cfg) {
lis, err := net.Listen("tcp", sc.Endpoint())
if err != nil {
- c.log.Error("can't listen gRPC endpoint", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
return
}
@@ -93,14 +94,14 @@ func serveGRPC(c *cfg) {
go func() {
defer func() {
- c.log.Info("stop listening gRPC endpoint",
+ c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
)
c.wg.Done()
}()
- c.log.Info("start listening gRPC endpoint",
+ c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
zap.String("endpoint", lis.Addr().String()),
)
@@ -114,7 +115,7 @@ func serveGRPC(c *cfg) {
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
- l.Info("stopping gRPC server...")
+ l.Info(logs.FrostFSNodeStoppingGRPCServer)
// GracefulStop() may freeze forever, see #1270
done := make(chan struct{})
@@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
select {
case <-done:
case <-time.After(1 * time.Minute):
- l.Info("gRPC cannot shutdown gracefully, forcing stop")
+ l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
s.Stop()
}
- l.Info("gRPC server stopped successfully")
+ l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
}
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index a97ad3879..786843b0b 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -8,6 +8,7 @@ import (
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
"go.uber.org/zap"
@@ -142,14 +143,14 @@ func bootUp(ctx context.Context, c *cfg) {
}
func wait(c *cfg, cancel func()) {
- c.log.Info("application started",
+ c.log.Info(logs.CommonApplicationStarted,
zap.String("version", misc.Version))
<-c.done // graceful shutdown
cancel()
- c.log.Debug("waiting for all processes to stop")
+ c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
c.wg.Wait()
}
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 2db865ca3..72378d8f3 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -7,6 +7,7 @@ import (
"time"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
)
if err != nil {
- c.log.Info("failed to create neo RPC client",
+ c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.String("error", err.Error()),
)
@@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) {
}
c.onShutdown(func() {
- c.log.Info("closing morph components...")
+ c.log.Info(logs.FrostFSNodeClosingMorphComponents)
cli.Close()
})
if err := cli.SetGroupSignerScope(); err != nil {
- c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
+ c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
}
c.cfgMorph.client = cli
@@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
fatalOnErr(err)
}
- c.log.Info("notary support",
+ c.log.Info(logs.FrostFSNodeNotarySupport,
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
)
@@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
fatalOnErr(err)
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
- c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL))
+ c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
}
if c.cfgMorph.cacheTTL < 0 {
@@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- c.log.Info("notary deposit has already been made")
+ c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
return
}
@@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
+ c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
subs, err = subscriber.New(ctx, &subscriber.Params{
@@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
res, err := netmapEvent.ParseNewEpoch(src)
if err == nil {
- c.log.Info("new epoch event from sidechain",
+ c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
)
}
@@ -226,11 +227,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
registerBlockHandler(lis, func(block *block.Block) {
- c.log.Debug("new block", zap.Uint32("index", block.Index))
+ c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
if err != nil {
- c.log.Warn("can't update persistent state",
+ c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index d9b1c9208..76cceeb6d 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -8,6 +8,7 @@ import (
netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -193,7 +194,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
err := c.bootstrap()
if err != nil {
- c.log.Warn("can't send re-bootstrap tx", zap.Error(err))
+ c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
}
}
})
@@ -203,7 +204,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
ni, err := c.netmapLocalNodeState(e)
if err != nil {
- c.log.Error("could not update node state on new epoch",
+ c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", e),
zap.String("error", err.Error()),
)
@@ -218,7 +219,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
_, err := makeNotaryDeposit(c)
if err != nil {
- c.log.Error("could not make notary deposit",
+ c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
)
}
@@ -298,7 +299,7 @@ func initNetmapState(c *cfg) {
}
}
- c.log.Info("initial network state",
+ c.log.Info(logs.FrostFSNodeInitialNetworkState,
zap.Uint64("epoch", epoch),
zap.String("state", stateWord),
)
diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go
index 4a310e5b0..9c90e052c 100644
--- a/cmd/frostfs-node/notificator.go
+++ b/cmd/frostfs-node/notificator.go
@@ -6,6 +6,7 @@ import (
"fmt"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
if err != nil {
- log.Error("notificator: could not list containers", zap.Error(err))
+ log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
return
}
@@ -43,7 +44,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
selectRes, err := n.e.Select(selectPrm)
if err != nil {
- log.Error("notificator: could not select objects from container",
+ log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
zap.Stringer("cid", c),
zap.Error(err),
)
@@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
for _, a := range selectRes.AddressList() {
err = n.processAddress(ctx, a, handler)
if err != nil {
- log.Error("notificator: could not process object",
+ log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
zap.Stringer("address", a),
zap.Error(err),
)
@@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
}
}
- log.Debug("notificator: finished processing object notifications")
+ log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
}
func (n *notificationSource) processAddress(
@@ -101,7 +102,7 @@ type notificationWriter struct {
func (n notificationWriter) Notify(topic string, address oid.Address) {
if err := n.w.Notify(topic, address); err != nil {
- n.l.Warn("could not write object notification",
+ n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
zap.Stringer("address", address),
zap.String("topic", topic),
zap.Error(err),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index ff4335ff9..8f5a83eb0 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -11,6 +11,7 @@ import (
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -62,7 +63,7 @@ type objectSvc struct {
func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
- c.log.Error("could not get max object size value",
+ c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.String("error", err.Error()),
)
}
@@ -259,7 +260,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
- c.log.Warn("could not inhume mark redundant copy as garbage",
+ c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.String("error", err.Error()),
)
}
@@ -600,7 +601,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.
}
}
} else {
- c.log.Warn("could not get latest network map to overload the client",
+ c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient,
zap.String("error", err.Error()),
)
}
diff --git a/cmd/frostfs-node/reputation.go b/cmd/frostfs-node/reputation.go
index a96bd066e..b3acf7eb0 100644
--- a/cmd/frostfs-node/reputation.go
+++ b/cmd/frostfs-node/reputation.go
@@ -11,6 +11,7 @@ import (
intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -105,7 +106,7 @@ func addReputationReportHandler(ctx context.Context, c *cfg) {
addNewEpochAsyncNotificationHandler(
c,
func(ev event.Event) {
- c.log.Debug("start reporting reputation on new epoch event")
+ c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent)
var reportPrm localtrustcontroller.ReportPrm
@@ -127,13 +128,13 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
duration, err := c.cfgNetmap.wrapper.EpochDuration()
if err != nil {
- log.Debug("could not fetch epoch duration", zap.Error(err))
+ log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err))
return
}
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
if err != nil {
- log.Debug("could not fetch iteration number", zap.Error(err))
+ log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err))
return
}
@@ -145,7 +146,7 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
)
})
if err != nil {
- log.Debug("could not create fixed epoch timer", zap.Error(err))
+ log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err))
return
}
diff --git a/cmd/frostfs-node/reputation/common/remote.go b/cmd/frostfs-node/reputation/common/remote.go
index cd0a024a9..f1982301f 100644
--- a/cmd/frostfs-node/reputation/common/remote.go
+++ b/cmd/frostfs-node/reputation/common/remote.go
@@ -3,6 +3,7 @@ package common
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@@ -71,16 +72,16 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
}
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
- rtp.log.Debug("initializing remote writer provider")
+ rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider)
if srv == nil {
- rtp.log.Debug("route has reached dead-end provider")
+ rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider)
return rtp.deadEndProvider, nil
}
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
// if local => return no-op writer
- rtp.log.Debug("initializing no-op writer provider")
+ rtp.log.Debug(logs.CommonInitializingNoopWriterProvider)
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
}
diff --git a/cmd/frostfs-node/reputation/intermediate/consumers.go b/cmd/frostfs-node/reputation/intermediate/consumers.go
index 33eab605b..02cdb2a2b 100644
--- a/cmd/frostfs-node/reputation/intermediate/consumers.go
+++ b/cmd/frostfs-node/reputation/intermediate/consumers.go
@@ -3,6 +3,7 @@ package intermediate
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
@@ -31,7 +32,7 @@ type ConsumerTrustWriter struct {
}
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
- w.log.Debug("writing received consumer's trusts",
+ w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts,
zap.Uint64("epoch", w.iterInfo.Epoch()),
zap.Uint32("iteration", w.iterInfo.I()),
zap.Stringer("trusting_peer", t.TrustingPeer()),
diff --git a/cmd/frostfs-node/reputation/intermediate/contract.go b/cmd/frostfs-node/reputation/intermediate/contract.go
index 6303b1219..2d83598bc 100644
--- a/cmd/frostfs-node/reputation/intermediate/contract.go
+++ b/cmd/frostfs-node/reputation/intermediate/contract.go
@@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
@@ -71,7 +72,7 @@ type FinalWriter struct {
}
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
- fw.l.Debug("start writing global trusts to contract")
+ fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract)
args := repClient.PutPrm{}
diff --git a/cmd/frostfs-node/reputation/intermediate/daughters.go b/cmd/frostfs-node/reputation/intermediate/daughters.go
index d72eead43..30237537c 100644
--- a/cmd/frostfs-node/reputation/intermediate/daughters.go
+++ b/cmd/frostfs-node/reputation/intermediate/daughters.go
@@ -3,6 +3,7 @@ package intermediate
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
@@ -27,7 +28,7 @@ type DaughterTrustWriter struct {
}
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
- w.log.Debug("writing received daughter's trusts",
+ w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts,
zap.Uint64("epoch", w.ep.Epoch()),
zap.Stringer("trusting_peer", t.TrustingPeer()),
zap.Stringer("trusted_peer", t.Peer()),
diff --git a/cmd/frostfs-node/reputation/intermediate/remote.go b/cmd/frostfs-node/reputation/intermediate/remote.go
index b1a218b94..8087463b5 100644
--- a/cmd/frostfs-node/reputation/intermediate/remote.go
+++ b/cmd/frostfs-node/reputation/intermediate/remote.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@@ -92,7 +93,7 @@ func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) err
epoch := rtp.iterInfo.Epoch()
i := rtp.iterInfo.I()
- rtp.log.Debug("announcing trust",
+ rtp.log.Debug(logs.IntermediateAnnouncingTrust,
zap.Uint64("epoch", epoch),
zap.Uint32("iteration", i),
zap.Stringer("trusting_peer", t.TrustingPeer()),
diff --git a/cmd/frostfs-node/reputation/local/remote.go b/cmd/frostfs-node/reputation/local/remote.go
index 3c929a9ca..6197c6d69 100644
--- a/cmd/frostfs-node/reputation/local/remote.go
+++ b/cmd/frostfs-node/reputation/local/remote.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@@ -96,7 +97,7 @@ func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
epoch := rtp.ep.Epoch()
- rtp.log.Debug("announcing trusts",
+ rtp.log.Debug(logs.LocalAnnouncingTrusts,
zap.Uint64("epoch", epoch),
)
diff --git a/cmd/frostfs-node/reputation/local/storage.go b/cmd/frostfs-node/reputation/local/storage.go
index 861151871..a0dc3d4ce 100644
--- a/cmd/frostfs-node/reputation/local/storage.go
+++ b/cmd/frostfs-node/reputation/local/storage.go
@@ -4,6 +4,7 @@ import (
"bytes"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
@@ -27,7 +28,7 @@ type TrustStorage struct {
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
epoch := ep.Epoch()
- s.Log.Debug("initializing iterator over trusts",
+ s.Log.Debug(logs.LocalInitializingIteratorOverTrusts,
zap.Uint64("epoch", epoch),
)
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index bbdb71c64..d963ba866 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -6,6 +6,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
@@ -14,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) {
_, err := tracing.Setup(ctx, *conf)
if err != nil {
- c.log.Error("failed init tracing", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
}
c.closers = append(c.closers, closer{
@@ -24,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) {
defer cancel()
err := tracing.Shutdown(ctx) //cfg context cancels before close
if err != nil {
- c.log.Error("failed shutdown tracing", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
}
},
})
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index 93a364471..b4f43acac 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -6,6 +6,7 @@ import (
"time"
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
@@ -37,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
func initTreeService(c *cfg) {
treeConfig := treeconfig.Tree(c.appCfg)
if !treeConfig.Enabled() {
- c.log.Info("tree service is not enabled, skip initialization")
+ c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
return
}
@@ -68,7 +69,7 @@ func initTreeService(c *cfg) {
addNewEpochNotificationHandler(c, func(_ event.Event) {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error("could not synchronize Tree Service", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
}
})
} else {
@@ -79,7 +80,7 @@ func initTreeService(c *cfg) {
for range tick.C {
err := c.treeService.SynchronizeAll()
if err != nil {
- c.log.Error("could not synchronize Tree Service", zap.Error(err))
+ c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
if errors.Is(err, tree.ErrShuttingDown) {
return
}
@@ -92,11 +93,11 @@ func initTreeService(c *cfg) {
ev := e.(containerEvent.DeleteSuccess)
// This is executed asynchronously, so we don't care about the operation taking some time.
- c.log.Debug("removing all trees for container", zap.Stringer("cid", ev.ID))
+ c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
err := c.treeService.DropTree(context.Background(), ev.ID, "")
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
- c.log.Error("container removal event received, but trees weren't removed",
+ c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.String("error", err.Error()))
}
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
new file mode 100644
index 000000000..46ed8e867
--- /dev/null
+++ b/internal/logs/logs.go
@@ -0,0 +1,643 @@
+package logs
+
+const (
+ InnerringAmountCanNotBeRepresentedAsAnInt64 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go
+ InnerringCantGetUsedSpaceEstimation = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go
+ InnerringSubnetCreationQueueFailure = "subnet creation queue failure" // Error in ../node/pkg/innerring/subnet.go
+ InnerringDiscardSubnetCreation = "discard subnet creation" // Info in ../node/pkg/innerring/subnet.go
+ InnerringApproveSubnetCreation = "approve subnet creation" // Error in ../node/pkg/innerring/subnet.go
+ InnerringSubnetRemovalHandlingFailure = "subnet removal handling failure" // Error in ../node/pkg/innerring/subnet.go
+ InnerringGettingNetmapCandidates = "getting netmap candidates" // Error in ../node/pkg/innerring/subnet.go
+ InnerringUnmarshallingRemovedSubnetID = "unmarshalling removed subnet ID" // Error in ../node/pkg/innerring/subnet.go
+ InnerringIteratingNodesSubnets = "iterating node's subnets" // Error in ../node/pkg/innerring/subnet.go
+ InnerringRemovingNodeFromNetmapCandidates = "removing node from netmap candidates" // Debug in ../node/pkg/innerring/subnet.go
+ InnerringRemovingNodeFromCandidates = "removing node from candidates" // Error in ../node/pkg/innerring/subnet.go
+ InnerringRemovingSubnetFromTheNode = "removing subnet from the node" // Debug in ../node/pkg/innerring/subnet.go
+ InnerringUpdatingSubnetInfo = "updating subnet info" // Error in ../node/pkg/innerring/subnet.go
+ InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go
+ InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go
+ InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go
+ InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go
+ InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go
+ InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go
+ InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go
+ InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go
+ InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go
+ InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go
+ InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go
+ InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go
+ InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go
+ InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go
+ InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go
+ InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go
+ InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go
+ InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go
+ InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go
+ InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go
+ InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go
+ InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go
+ InnerringCantSetupRemoteConnection = "can't setup remote connection" // Warn in ../node/pkg/innerring/rpc.go
+ InnerringCantGetStorageGroupObject = "can't get storage group object" // Warn in ../node/pkg/innerring/rpc.go
+ NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go
+ NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go
+ PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go
+ PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go
+ PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go
+ PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go
+ PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go
+ PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go
+ PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go
+ PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go
+ PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go
+ PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
+ PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go
+ PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go
+ ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go
+ ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
+ ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
+ ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
+ SessionServingRequest = "serving request..." // Debug in ../node/pkg/services/session/executor.go
+ TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
+ TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
+ TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
+ TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
+ TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go
+ TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go
+ TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go
+ TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go
+ TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go
+ TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go
+ TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go
+ TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go
+ TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go
+ TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go
+ TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go
+ TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go
+ TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go
+ TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go
+ TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go
+ TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go
+ PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go
+ PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go
+ PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go
+ CommonStartBuildingManagers = "start building managers" // Debug in ../node/pkg/services/reputation/common/managers.go
+ ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerStartingToReportLocalTrustValues = "starting to report local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerCouldNotInitializeIteratorOverLocalTrustValues = "could not initialize iterator over local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerCouldNotInitializeLocalTrustTarget = "could not initialize local trust target" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerIteratorOverLocalTrustFailed = "iterator over local trust failed" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerCouldNotFinishWritingLocalTrustValues = "could not finish writing local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerReportingSuccessfullyFinished = "reporting successfully finished" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerReportingSuccessfullyInterrupted = "reporting successfully interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ ControllerReportingIsNotStartedOrAlreadyInterrupted = "reporting is not started or already interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
+ RoutesBuildingNextStageForLocalTrustRoute = "building next stage for local trust route" // Debug in ../node/pkg/services/reputation/local/routes/calls.go
+ CalculatorFailedToGetAlphaParam = "failed to get alpha param" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorConsumersTrustIteratorsInitFailure = "consumers trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorWorkerPoolSubmitFailure = "worker pool submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorIterateDaughtersConsumersFailed = "iterate daughter's consumers failed" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorGetInitialTrustFailure = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorDaughterTrustIteratorsInitFailure = "daughter trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorIterateOverDaughtersTrustsFailure = "iterate over daughter's trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorInitWriterFailure = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorWriteFinalResultFailure = "write final result failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorWriteValueFailure = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorIterateDaughterTrustsFailure = "iterate daughter trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorCouldNotCloseWriter = "could not close writer" // Error in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorAllDaughtersTrustIteratorsInitFailure = "all daughters trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ CalculatorIterateOverAllDaughtersFailure = "iterate over all daughters failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
+ ControllerCouldNotGetEigenTrustIterationNumber = "could not get EigenTrust iteration number" // Error in ../node/pkg/services/reputation/eigentrust/controller/calls.go
+ ControllerIterationSubmitFailure = "iteration submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/controller/calls.go
+ RoutesBuildingNextStageForTrustRoute = "building next stage for trust route" // Debug in ../node/pkg/services/reputation/eigentrust/routes/calls.go
+ RouterCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/reputation/common/router/calls.go
+ RouterCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
+ RouterCouldNotWriteTheValue = "could not write the value" // Debug in ../node/pkg/services/reputation/common/router/calls.go
+ RouterCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
+ TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
+ TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
+ DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
+ DeleteServingRequest = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go
+ DeleteOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go
+ DeleteOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go
+ DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go
+ DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go
+ DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go
+ DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go
+ DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
+ DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
+ DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
+ GetProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go
+ GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
+ GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
+ GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
+ GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
+ GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
+ GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
+ GetCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go
+ GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
+ GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
+ GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
+ GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
+ GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
+ GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
+ GetTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go
+ GetProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/get/container.go
+ GetNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go
+ GetInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go
+ GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
+ GetServingRequest = "serving request..." // Debug in ../node/pkg/services/object/get/get.go
+ GetOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go
+ GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
+ GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
+ GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
+ GetOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go
+ PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
+ SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
+ SearchTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go
+ SearchProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/search/container.go
+ SearchNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go
+ SearchInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go
+ SearchProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/search/container.go
+ SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
+ SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
+ SearchCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go
+ SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
+ SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
+ SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
+ SearchServingRequest = "serving request..." // Debug in ../node/pkg/services/object/search/search.go
+ SearchOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go
+ SearchOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go
+ UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go
+ UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
+ V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
+ V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
+ NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go
+ NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go
+ NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go
+ ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
+ RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
+ RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
+ RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
+ RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
+ AuditorCouldNotGetObjectHeaderFromCandidate = "could not get object header from candidate" // Debug in ../node/pkg/services/audit/auditor/pop.go
+ AuditorCouldNotBuildPlacementForObject = "could not build placement for object" // Debug in ../node/pkg/services/audit/auditor/pop.go
+ AuditorCantHeadObject = "can't head object" // Debug in ../node/pkg/services/audit/auditor/por.go
+ AuditorCantConcatenateTzHash = "can't concatenate tz hash" // Debug in ../node/pkg/services/audit/auditor/por.go
+ AuditorStorageGroupSizeCheckFailed = "storage group size check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
+ AuditorStorageGroupTzHashCheckFailed = "storage group tz hash check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
+ AuditorCantBuildPlacementForStorageGroupMember = "can't build placement for storage group member" // Info in ../node/pkg/services/audit/auditor/por.go
+ AuditorAuditContextIsDone = "audit context is done" // Debug in ../node/pkg/services/audit/auditor/context.go
+ AuditorWritingAuditReport = "writing audit report..." // Debug in ../node/pkg/services/audit/auditor/context.go
+ AuditorCouldNotWriteAuditReport = "could not write audit report" // Error in ../node/pkg/services/audit/auditor/context.go
+ AuditorSleepBeforeGetRangeHash = "sleep before get range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
+ AuditorCouldNotGetPayloadRangeHash = "could not get payload range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
+ TaskmanagerProcessRoutine = "process routine" // Info in ../node/pkg/services/audit/taskmanager/listen.go
+ TaskmanagerStopListenerByContext = "stop listener by context" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
+ TaskmanagerQueueChannelIsClosed = "queue channel is closed" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
+ TaskmanagerCouldNotGeneratePDPWorkerPool = "could not generate PDP worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
+ TaskmanagerCouldNotGeneratePoRWorkerPool = "could not generate PoR worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
+ TaskmanagerCouldNotSubmitAuditTask = "could not submit audit task" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
+ ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
+ ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
+ ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
+ ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go
+ ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go
+ ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go
+ ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go
+ ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go
+ ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go
+ ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go
+ ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go
+ ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go
+ ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go
+ ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go
+ ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go
+ ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
+ ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
+ ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
+ ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
+ EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go
+ EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go
+ EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go
+ EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go
+ EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go
+ EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go
+ EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go
+ EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go
+ EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go
+ EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go
+ EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
+ EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go
+ EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go
+ EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
+ EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go
+ EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go
+ EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go
+ EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go
+ EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
+ EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go
+ EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go
+ EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go
+ EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go
+ EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
+ EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go
+ EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go
+ EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go
+ EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go
+ EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
+ EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go
+ SubscriberUnsubscribeForNotification = "unsubscribe for notification" // Error in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
+ SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
+ BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
+ BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go
+ BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
+ BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
+ BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
+ BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go
+ BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go
+ BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go
+ EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go
+ EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go
+ EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
+ EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go
+ EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
+ EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go
+ EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go
+ EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go
+ EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go
+ EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go
+ EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go
+ EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go
+ EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go
+ EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go
+ EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go
+ EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go
+ EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
+ EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
+ EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go
+ EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go
+ EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
+ EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
+ EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go
+ EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
+ EngineFinishedShardsEvacuation = "finished shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
+ EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go
+ MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go
+ MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
+ MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
+ MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go
+ ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go
+ ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go
+ ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go
+ ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go
+ ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go
+ ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go
+ ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go
+ ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go
+ ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go
+ ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go
+ ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go
+ ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go
+ ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go
+ ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go
+ ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go
+ ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go
+ ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go
+ ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go
+ ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go
+ ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go
+ ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go
+ ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go
+ ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go
+ ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
+ ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
+ ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
+ WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
+ WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
+ WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
+ WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
+ WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
+ WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
+ WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
+ WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
+ WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
+ BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+ BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+ BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+ BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+ BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+ BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+ BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+ BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+ BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+ BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+ BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+ BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+ BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+ BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+ BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+ BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+ BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+ BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+ BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+ BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+ BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+ AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
+ AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
+ AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
+ AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
+ AuditContainerListingFinished = "container listing finished" // Debug in ../node/pkg/innerring/processors/audit/scheduler.go
+ AuditNewRoundOfAudit = "new round of audit" // Info in ../node/pkg/innerring/processors/audit/handlers.go
+ AuditPreviousRoundOfAuditPrepareHasntFinishedYet = "previous round of audit prepare hasn't finished yet" // Warn in ../node/pkg/innerring/processors/audit/handlers.go
+ AuditSomeTasksFromPreviousEpochAreSkipped = "some tasks from previous epoch are skipped" // Info in ../node/pkg/innerring/processors/audit/process.go
+ AuditContainerSelectionFailure = "container selection failure" // Error in ../node/pkg/innerring/processors/audit/process.go
+ AuditSelectContainersForAudit = "select containers for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
+ AuditCantFetchNetworkMap = "can't fetch network map" // Error in ../node/pkg/innerring/processors/audit/process.go
+ AuditCantGetContainerInfoIgnore = "can't get container info, ignore" // Error in ../node/pkg/innerring/processors/audit/process.go
+ AuditCantBuildPlacementForContainerIgnore = "can't build placement for container, ignore" // Info in ../node/pkg/innerring/processors/audit/process.go
+ AuditSelectStorageGroupsForAudit = "select storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
+ AuditFilterExpiredStorageGroupsForAudit = "filter expired storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
+ AuditParseClientNodeInfo = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go
+ AuditErrorInStorageGroupSearch = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go
+ AuditCouldNotGetStorageGroupObjectForAuditSkipping = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go
+ BalanceNotification = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go
+ BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
+ BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
+ BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
+ BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
+ ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
+ ContainerNotification = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
+ ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
+ ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
+ ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
+ ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
+ ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
+ FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
+ FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
+ FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
+ FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
+ FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
+ FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
+ FrostFSNotification = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
+ FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
+ FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
+ GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
+ GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
+ GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
+ NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
+ NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
+ NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
+ NetmapNotification = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
+ NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
+ NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
+ NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
+ NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
+ NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
+ NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
+ NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
+ NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotGetNetworkMapCandidates = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotUnmarshalSubnetId = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapGotZeroSubnetInRemoveNodeNotification = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
+ ReputationNotification = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go
+ ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
+ ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
+ ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
+ ReputationCantSendApprovalTxForReputationValue = "can't send approval tx for reputation value" // Warn in ../node/pkg/innerring/processors/reputation/process_put.go
+ ReputationReputationWorkerPool = "reputation worker pool" // Debug in ../node/pkg/innerring/processors/reputation/processor.go
+ SettlementNonAlphabetModeIgnoreAuditPayments = "non alphabet mode, ignore audit payments" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementNewAuditSettlementEvent = "new audit settlement event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementIgnoreGenesisEpoch = "ignore genesis epoch" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementCouldNotAddHandlerOfAuditEventToQueue = "could not add handler of AuditEvent to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementAuditEventHandlingSuccessfullyScheduled = "AuditEvent handling successfully scheduled" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementNonAlphabetModeIgnoreIncomeCollectionEvent = "non alphabet mode, ignore income collection event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementStartBasicIncomeCollection = "start basic income collection" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementIncomeContextAlreadyExists = "income context already exists" // Error in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementCantCreateIncomeContext = "can't create income context" // Error in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue = "could not add handler of basic income collection to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementNonAlphabetModeIgnoreIncomeDistributionEvent = "non alphabet mode, ignore income distribution event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementStartBasicIncomeDistribution = "start basic income distribution" // Info in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementIncomeContextDistributionDoesNotExists = "income context distribution does not exists" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue = "could not add handler of basic income distribution to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
+ SettlementProcessAuditSettlements = "process audit settlements" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
+ SettlementAuditProcessingFinished = "audit processing finished" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
+ SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized = "worker pool for settlement processor successfully initialized" // Debug in ../node/pkg/innerring/processors/settlement/processor.go
+ AuditSettlementsAreIgnoredForZeroEpoch = "settlements are ignored for zero epoch" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCalculateAuditSettlements = "calculate audit settlements" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditGettingResultsForThePreviousEpoch = "getting results for the previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotCollectAuditResults = "could not collect audit results" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditNoAuditResultsInPreviousEpoch = "no audit results in previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCantFetchAuditFeeFromNetworkConfig = "can't fetch audit fee from network config" // Warn in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditProcessingAuditResults = "processing audit results" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditProcessingTransfers = "processing transfers" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditReadingInformationAboutTheContainer = "reading information about the container" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditBuildingPlacement = "building placement" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCollectingPassedNodes = "collecting passed nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCalculatingSumOfTheSizesOfAllStorageGroups = "calculating sum of the sizes of all storage groups" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditFillingTransferTable = "filling transfer table" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditMissingContainerInAuditResult = "missing container in audit result" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotGetContainerInfo = "could not get container info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotGetContainerNodes = "could not get container nodes" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditEmptyListOfContainerNodes = "empty list of container nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditNoneOfTheContainerNodesPassedTheAudit = "none of the container nodes passed the audit" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotGetSGInfo = "could not get storage group info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditZeroSumSGSize = "zero sum storage group size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotResolvePublicKeyOfTheStorageNode = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCalculatingStorageNodeSalaryForAudit = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ AuditCouldNotParsePublicKeyOfTheInnerRingNode = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
+ BasicCantGetBasicIncomeRate = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
+ BasicCantFetchContainerSizeEstimations = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
+ BasicCantFetchContainerInfo = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go
+ BasicCantFetchBalanceOfBankingAccount = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go
+ BasicCantTransformPublicKeyToOwnerID = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go
+ FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
+ FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
+ FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
+ FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
+ FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
+ FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
+ FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
+ FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
+ FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
+ FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
+ FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
+ FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
+ FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
+ FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
+ FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
+ FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
+ FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
+ FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
+ FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
+ FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
+ FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
+ FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
+ FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
+ FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
+ FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
+ FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
+ FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
+ FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
+ FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
+ FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
+ FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
+ FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
+ CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go
+ CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
+ CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
+ CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
+ IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
+ IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
+ IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
+ IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
+ IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
+ IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
+ IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
+ LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
+ LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
+)
diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go
index eb74e44d4..94e262099 100644
--- a/pkg/innerring/blocktimer.go
+++ b/pkg/innerring/blocktimer.go
@@ -3,6 +3,7 @@ package innerring
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
@@ -98,7 +99,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
args.stopEstimationDDiv,
func() {
if !args.alphabetState.IsAlphabet() {
- args.l.Debug("non-alphabet mode, do not stop container estimations")
+ args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations)
return
}
@@ -112,7 +113,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
err := args.cnrWrapper.StopEstimation(prm)
if err != nil {
- args.l.Warn("can't stop epoch estimation",
+ args.l.Warn(logs.InnerringCantStopEpochEstimation,
zap.Uint64("epoch", epochN),
zap.String("error", err.Error()))
}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 8db6328a2..2f5e89e39 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -6,6 +6,7 @@ import (
"fmt"
"net"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
@@ -129,7 +130,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
- s.log.Warn("can't get last processed main chain block number", zap.String("error", err.Error()))
+ s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
}
mainnetChain.from = fromMainChainBlock
@@ -177,7 +178,7 @@ func (s *Server) initNotaryConfig(cfg *viper.Viper) {
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
)
- s.log.Info("notary support",
+ s.log.Info(logs.InnerringNotarySupport,
zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled),
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
)
@@ -275,7 +276,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
if s.withoutMainNet || cfg.GetBool("governance.disable") {
alphaSync = func(event.Event) {
- s.log.Debug("alphabet keys sync is disabled")
+ s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
}
} else {
// create governance processor
@@ -496,7 +497,7 @@ func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.F
func (s *Server) initGRPCServer(cfg *viper.Viper) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
- s.log.Info("no Control server endpoint specified, service is disabled")
+ s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
return nil
}
@@ -692,7 +693,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
- s.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
+ s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
}
morphChain := &chainParams{
@@ -715,7 +716,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
return nil, err
}
if err := s.morphClient.SetGroupSignerScope(); err != nil {
- morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
+ morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
}
return morphChain, nil
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index a91d2fd0d..b6c5ae2ac 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
@@ -168,7 +169,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
err = s.voteForSidechainValidator(prm)
if err != nil {
// we don't stop inner ring execution on this error
- s.log.Warn("can't vote for prepared validators",
+ s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
zap.String("error", err.Error()))
}
@@ -210,13 +211,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
func (s *Server) registerMorphNewBlockEventHandler() {
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
- s.log.Debug("new block",
+ s.log.Debug(logs.InnerringNewBlock,
zap.Uint32("index", b.Index),
)
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn("can't update persistent state",
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "side"),
zap.Uint32("block_index", b.Index))
}
@@ -230,7 +231,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() {
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
if err != nil {
- s.log.Warn("can't update persistent state",
+ s.log.Warn(logs.InnerringCantUpdatePersistentState,
zap.String("chain", "main"),
zap.Uint32("block_index", b.Index))
}
@@ -302,7 +303,7 @@ func (s *Server) Stop() {
for _, c := range s.closers {
if err := c(); err != nil {
- s.log.Warn("closer error",
+ s.log.Warn(logs.InnerringCloserError,
zap.String("error", err.Error()),
)
}
@@ -547,7 +548,7 @@ func (s *Server) initConfigFromBlockchain() error {
return err
}
- s.log.Debug("read config from blockchain",
+ s.log.Debug(logs.InnerringReadConfigFromBlockchain,
zap.Bool("active", s.IsActive()),
zap.Bool("alphabet", s.IsAlphabet()),
zap.Uint64("epoch", epoch),
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index 50353b574..30916cb99 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -52,14 +53,14 @@ func (s *Server) notaryHandler(_ event.Event) {
if !s.mainNotaryConfig.disabled {
_, err := s.depositMainNotary()
if err != nil {
- s.log.Error("can't make notary deposit in main chain", zap.Error(err))
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
}
}
if !s.sideNotaryConfig.disabled {
_, err := s.depositSideNotary()
if err != nil {
- s.log.Error("can't make notary deposit in side chain", zap.Error(err))
+ s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
}
}
}
@@ -82,7 +83,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
// non-error deposit with an empty TX hash means
// that the deposit has already been made; no
// need to wait it.
- s.log.Info("notary deposit has already been made")
+ s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
return nil
}
diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go
index 9d61aa812..c0668a4f9 100644
--- a/pkg/innerring/processors/alphabet/handlers.go
+++ b/pkg/innerring/processors/alphabet/handlers.go
@@ -1,6 +1,7 @@
package alphabet
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"go.uber.org/zap"
@@ -8,14 +9,14 @@ import (
func (ap *Processor) HandleGasEmission(ev event.Event) {
_ = ev.(timers.NewAlphabetEmitTick)
- ap.log.Info("tick", zap.String("type", "alphabet gas emit"))
+ ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
// send event to the worker pool
err := ap.pool.Submit(func() { ap.processEmit() })
if err != nil {
// there system can be moved into controlled degradation stage
- ap.log.Warn("alphabet processor worker pool drained",
+ ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
zap.Int("capacity", ap.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go
index 90c484b88..b8d65dbc5 100644
--- a/pkg/innerring/processors/alphabet/process_emit.go
+++ b/pkg/innerring/processors/alphabet/process_emit.go
@@ -3,6 +3,7 @@ package alphabet
import (
"crypto/elliptic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
@@ -14,14 +15,14 @@ const emitMethod = "emit"
func (ap *Processor) processEmit() {
index := ap.irList.AlphabetIndex()
if index < 0 {
- ap.log.Info("non alphabet mode, ignore gas emission event")
+ ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
return
}
contract, ok := ap.alphabetContracts.GetByIndex(index)
if !ok {
- ap.log.Debug("node is out of alphabet range, ignore gas emission event",
+ ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
zap.Int("index", index))
return
@@ -30,20 +31,20 @@ func (ap *Processor) processEmit() {
// there is no signature collecting, so we don't need extra fee
err := ap.morphClient.Invoke(contract, 0, emitMethod)
if err != nil {
- ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error()))
+ ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
return
}
if ap.storageEmission == 0 {
- ap.log.Info("storage node emission is off")
+ ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
return
}
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
- ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes",
+ ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.String("error", err.Error()))
return
@@ -53,7 +54,7 @@ func (ap *Processor) processEmit() {
nmLen := len(nmNodes)
extraLen := len(ap.parsedWallets)
- ap.log.Debug("gas emission",
+ ap.log.Debug(logs.AlphabetGasEmission,
zap.Int("network_map", nmLen),
zap.Int("extra_wallets", extraLen))
@@ -74,7 +75,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
- ap.log.Warn("can't parse node public key",
+ ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
zap.String("error", err.Error()))
continue
@@ -82,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
if err != nil {
- ap.log.Warn("can't transfer gas",
+ ap.log.Warn(logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
@@ -99,7 +100,7 @@ func (ap *Processor) transferGasToExtraNodes(extraLen int, gasPerNode fixedn.Fix
for i, addr := range ap.parsedWallets {
receiversLog[i] = addr.StringLE()
}
- ap.log.Warn("can't transfer gas to wallet",
+ ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index 980158132..79b61f14f 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -67,7 +68,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/alphabet: global state is not set")
}
- p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/audit/handlers.go b/pkg/innerring/processors/audit/handlers.go
index 8b2354bb8..06c656fa2 100644
--- a/pkg/innerring/processors/audit/handlers.go
+++ b/pkg/innerring/processors/audit/handlers.go
@@ -1,6 +1,7 @@
package audit
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"go.uber.org/zap"
)
@@ -10,12 +11,12 @@ func (ap *Processor) handleNewAuditRound(ev event.Event) {
epoch := auditEvent.Epoch()
- ap.log.Info("new round of audit", zap.Uint64("epoch", epoch))
+ ap.log.Info(logs.AuditNewRoundOfAudit, zap.Uint64("epoch", epoch))
// send an event to the worker pool
err := ap.pool.Submit(func() { ap.processStartAudit(epoch) })
if err != nil {
- ap.log.Warn("previous round of audit prepare hasn't finished yet")
+ ap.log.Warn(logs.AuditPreviousRoundOfAuditPrepareHasntFinishedYet)
}
}
diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go
index 656927816..000279f01 100644
--- a/pkg/innerring/processors/audit/process.go
+++ b/pkg/innerring/processors/audit/process.go
@@ -4,6 +4,7 @@ import (
"context"
"crypto/sha256"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
@@ -24,23 +25,23 @@ func (ap *Processor) processStartAudit(epoch uint64) {
skipped := ap.taskManager.Reset()
if skipped > 0 {
- ap.log.Info("some tasks from previous epoch are skipped",
+ ap.log.Info(logs.AuditSomeTasksFromPreviousEpochAreSkipped,
zap.Int("amount", skipped),
)
}
containers, err := ap.selectContainersToAudit(epoch)
if err != nil {
- log.Error("container selection failure", zap.String("error", err.Error()))
+ log.Error(logs.AuditContainerSelectionFailure, zap.String("error", err.Error()))
return
}
- log.Info("select containers for audit", zap.Int("amount", len(containers)))
+ log.Info(logs.AuditSelectContainersForAudit, zap.Int("amount", len(containers)))
nm, err := ap.netmapClient.GetNetMap(0)
if err != nil {
- ap.log.Error("can't fetch network map",
+ ap.log.Error(logs.AuditCantFetchNetworkMap,
zap.String("error", err.Error()))
return
@@ -64,7 +65,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
for i := range containers {
cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
if err != nil {
- log.Error("can't get container info, ignore",
+ log.Error(logs.AuditCantGetContainerInfoIgnore,
zap.Stringer("cid", containers[i]),
zap.String("error", err.Error()))
@@ -76,7 +77,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
// find all container nodes for current epoch
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot)
if err != nil {
- log.Info("can't build placement for container, ignore",
+ log.Info(logs.AuditCantBuildPlacementForContainerIgnore,
zap.Stringer("cid", containers[i]),
zap.String("error", err.Error()))
@@ -92,13 +93,13 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
// search storage groups
storageGroupsIDs := ap.findStorageGroups(containers[i], n)
- log.Info("select storage groups for audit",
+ log.Info(logs.AuditSelectStorageGroupsForAudit,
zap.Stringer("cid", containers[i]),
zap.Int("amount", len(storageGroupsIDs)))
// filter expired storage groups
storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm)
- log.Info("filter expired storage groups for audit",
+ log.Info(logs.AuditFilterExpiredStorageGroupsForAudit,
zap.Stringer("cid", containers[i]),
zap.Int("amount", len(storageGroups)))
@@ -146,7 +147,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
if err != nil {
- log.Warn("parse client node info", zap.String("error", err.Error()))
+ log.Warn(logs.AuditParseClientNodeInfo, zap.String("error", err.Error()))
continue
}
@@ -162,7 +163,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
cancel()
if err != nil {
- log.Warn("error in storage group search", zap.String("error", err.Error()))
+ log.Warn(logs.AuditErrorInStorageGroupSearch, zap.String("error", err.Error()))
continue
}
diff --git a/pkg/innerring/processors/audit/scheduler.go b/pkg/innerring/processors/audit/scheduler.go
index e1a521bad..fbc5fa927 100644
--- a/pkg/innerring/processors/audit/scheduler.go
+++ b/pkg/innerring/processors/audit/scheduler.go
@@ -6,6 +6,7 @@ import (
"sort"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
@@ -20,7 +21,7 @@ func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) {
// consider getting extra information about container complexity from
// audit contract there
- ap.log.Debug("container listing finished",
+ ap.log.Debug(logs.AuditContainerListingFinished,
zap.Int("total amount", len(containers)),
)
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index 4c5a2ddc6..3360af916 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -3,6 +3,7 @@ package balance
import (
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
@@ -10,7 +11,7 @@ import (
func (bp *Processor) handleLock(ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info("notification",
+ bp.log.Info(logs.BalanceNotification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
@@ -19,7 +20,7 @@ func (bp *Processor) handleLock(ev event.Event) {
err := bp.pool.Submit(func() { bp.processLock(&lock) })
if err != nil {
// there system can be moved into controlled degradation stage
- bp.log.Warn("balance worker pool drained",
+ bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
zap.Int("capacity", bp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go
index 754dda34a..3f86a3cb7 100644
--- a/pkg/innerring/processors/balance/process_assets.go
+++ b/pkg/innerring/processors/balance/process_assets.go
@@ -1,6 +1,7 @@
package balance
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
"go.uber.org/zap"
@@ -10,7 +11,7 @@ import (
// back to the withdraw issuer.
func (bp *Processor) processLock(lock *balanceEvent.Lock) {
if !bp.alphabetState.IsAlphabet() {
- bp.log.Info("non alphabet mode, ignore balance lock")
+ bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
return
}
@@ -24,6 +25,6 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) {
err := bp.frostfsClient.Cheque(prm)
if err != nil {
- bp.log.Error("can't send lock asset tx", zap.Error(err))
+ bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
}
}
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 2527b7ec3..370d06f44 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
@@ -60,7 +61,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/balance: balance precision converter is not set")
}
- p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index f9f8b5841..3d1946b4f 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -3,6 +3,7 @@ package container
import (
"crypto/sha256"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"github.com/mr-tron/base58"
@@ -13,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info("notification",
+ cp.log.Info(logs.ContainerNotification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
@@ -22,14 +23,14 @@ func (cp *Processor) handlePut(ev event.Event) {
err := cp.pool.Submit(func() { cp.processContainerPut(put) })
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
func (cp *Processor) handleDelete(ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info("notification",
+ cp.log.Info(logs.ContainerNotification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
@@ -38,7 +39,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
err := cp.pool.Submit(func() { cp.processContainerDelete(&del) })
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
@@ -46,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
func (cp *Processor) handleSetEACL(ev event.Event) {
e := ev.(containerEvent.SetEACL)
- cp.log.Info("notification",
+ cp.log.Info(logs.ContainerNotification,
zap.String("type", "set EACL"),
)
@@ -57,7 +58,7 @@ func (cp *Processor) handleSetEACL(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- cp.log.Warn("container processor worker pool drained",
+ cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
zap.Int("capacity", cp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go
index 8b244aa5d..5ebe58375 100644
--- a/pkg/innerring/processors/container/process_container.go
+++ b/pkg/innerring/processors/container/process_container.go
@@ -3,6 +3,7 @@ package container
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
@@ -35,7 +36,7 @@ type putContainerContext struct {
// and sending approve tx back to the morph.
func (cp *Processor) processContainerPut(put putEvent) {
if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore container put")
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
return
}
@@ -45,7 +46,7 @@ func (cp *Processor) processContainerPut(put putEvent) {
err := cp.checkPutContainer(ctx)
if err != nil {
- cp.log.Error("put container check failed",
+ cp.log.Error(logs.ContainerPutContainerCheckFailed,
zap.String("error", err.Error()),
)
@@ -119,7 +120,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
err = cp.cnrClient.Put(prm)
}
if err != nil {
- cp.log.Error("could not approve put container",
+ cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
zap.String("error", err.Error()),
)
}
@@ -129,13 +130,13 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
// and sending approve tx back to morph.
func (cp *Processor) processContainerDelete(e *containerEvent.Delete) {
if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore container delete")
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
return
}
err := cp.checkDeleteContainer(e)
if err != nil {
- cp.log.Error("delete container check failed",
+ cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
zap.String("error", err.Error()),
)
@@ -194,7 +195,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) {
err = cp.cnrClient.Delete(prm)
}
if err != nil {
- cp.log.Error("could not approve delete container",
+ cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go
index e8bbb5db6..fce75c678 100644
--- a/pkg/innerring/processors/container/process_eacl.go
+++ b/pkg/innerring/processors/container/process_eacl.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
@@ -13,13 +14,13 @@ import (
func (cp *Processor) processSetEACL(e container.SetEACL) {
if !cp.alphabetState.IsAlphabet() {
- cp.log.Info("non alphabet mode, ignore set EACL")
+ cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL)
return
}
err := cp.checkSetEACL(e)
if err != nil {
- cp.log.Error("set EACL check failed",
+ cp.log.Error(logs.ContainerSetEACLCheckFailed,
zap.String("error", err.Error()),
)
@@ -91,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) {
err = cp.cnrClient.PutEACL(prm)
}
if err != nil {
- cp.log.Error("could not approve set EACL",
+ cp.log.Error(logs.ContainerCouldNotApproveSetEACL,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index ae0d28729..123ba77b8 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
@@ -88,7 +89,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/container: subnet client is not set")
}
- p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index bc0dbec7f..4eff15abe 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -3,6 +3,7 @@ package frostfs
import (
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
@@ -11,7 +12,7 @@ import (
func (np *Processor) handleDeposit(ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
@@ -20,14 +21,14 @@ func (np *Processor) handleDeposit(ev event.Event) {
err := np.pool.Submit(func() { np.processDeposit(&deposit) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleWithdraw(ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
@@ -36,14 +37,14 @@ func (np *Processor) handleWithdraw(ev event.Event) {
err := np.pool.Submit(func() { np.processWithdraw(&withdraw) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCheque(ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
@@ -52,14 +53,14 @@ func (np *Processor) handleCheque(ev event.Event) {
err := np.pool.Submit(func() { np.processCheque(&cheque) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleConfig(ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -69,14 +70,14 @@ func (np *Processor) handleConfig(ev event.Event) {
err := np.pool.Submit(func() { np.processConfig(&cfg) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleBind(ev event.Event) {
e := ev.(frostfsEvent.Bind)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "bind"),
)
@@ -85,14 +86,14 @@ func (np *Processor) handleBind(ev event.Event) {
err := np.pool.Submit(func() { np.processBind(e) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUnbind(ev event.Event) {
e := ev.(frostfsEvent.Unbind)
- np.log.Info("notification",
+ np.log.Info(logs.FrostFSNotification,
zap.String("type", "unbind"),
)
@@ -101,7 +102,7 @@ func (np *Processor) handleUnbind(ev event.Event) {
err := np.pool.Submit(func() { np.processBind(e) })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("frostfs processor worker pool drained",
+ np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go
index b28efaa33..e066975f7 100644
--- a/pkg/innerring/processors/frostfs/process_assets.go
+++ b/pkg/innerring/processors/frostfs/process_assets.go
@@ -1,6 +1,7 @@
package frostfs
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"github.com/nspcc-dev/neo-go/pkg/util"
@@ -16,7 +17,7 @@ const (
// gas in the sidechain.
func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore deposit")
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
return
}
@@ -29,7 +30,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// send transferX to a balance contract
err := np.balanceClient.Mint(prm)
if err != nil {
- np.log.Error("can't transfer assets to balance contract", zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
}
curEpoch := np.epochState.EpochCounter()
@@ -43,7 +44,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val+np.mintEmitThreshold >= curEpoch {
- np.log.Warn("double mint emission declined",
+ np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
zap.String("receiver", receiver.String()),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
@@ -55,12 +56,12 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// before gas transfer check if the balance is greater than the threshold
balance, err := np.morphClient.GasBalance()
if err != nil {
- np.log.Error("can't get gas balance of the node", zap.Error(err))
+ np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
return
}
if balance < np.gasBalanceThreshold {
- np.log.Warn("gas balance threshold has been reached",
+ np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
zap.Int64("balance", balance),
zap.Int64("threshold", np.gasBalanceThreshold))
@@ -69,7 +70,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
- np.log.Error("can't transfer native gas to receiver",
+ np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
zap.String("error", err.Error()))
return
@@ -81,14 +82,14 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
// Process withdraw event by locking assets in the balance account.
func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore withdraw")
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
return
}
// create lock account
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
if err != nil {
- np.log.Error("can't create lock account", zap.Error(err))
+ np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
return
}
@@ -104,7 +105,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
err = np.balanceClient.Lock(prm)
if err != nil {
- np.log.Error("can't lock assets for withdraw", zap.Error(err))
+ np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
}
}
@@ -112,7 +113,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
// the reserve account.
func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore cheque")
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
return
}
@@ -124,6 +125,6 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
err := np.balanceClient.Burn(prm)
if err != nil {
- np.log.Error("can't transfer assets to fed contract", zap.Error(err))
+ np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
}
}
diff --git a/pkg/innerring/processors/frostfs/process_bind.go b/pkg/innerring/processors/frostfs/process_bind.go
index 0abce5827..c5f8a930e 100644
--- a/pkg/innerring/processors/frostfs/process_bind.go
+++ b/pkg/innerring/processors/frostfs/process_bind.go
@@ -4,6 +4,7 @@ import (
"crypto/elliptic"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -20,7 +21,7 @@ type bindCommon interface {
func (np *Processor) processBind(e bindCommon) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore bind")
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreBind)
return
}
@@ -32,7 +33,7 @@ func (np *Processor) processBind(e bindCommon) {
err := np.checkBindCommon(c)
if err != nil {
- np.log.Error("invalid manage key event",
+ np.log.Error(logs.FrostFSInvalidManageKeyEvent,
zap.Bool("bind", c.bind),
zap.String("error", err.Error()),
)
@@ -77,7 +78,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) {
u160, err := util.Uint160DecodeBytesBE(scriptHash)
if err != nil {
- np.log.Error("could not decode script hash from bytes",
+ np.log.Error(logs.FrostFSCouldNotDecodeScriptHashFromBytes,
zap.String("error", err.Error()),
)
diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go
index ecc90332f..471edb9b7 100644
--- a/pkg/innerring/processors/frostfs/process_config.go
+++ b/pkg/innerring/processors/frostfs/process_config.go
@@ -1,6 +1,7 @@
package frostfs
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
"go.uber.org/zap"
@@ -10,7 +11,7 @@ import (
// the sidechain.
func (np *Processor) processConfig(config *frostfsEvent.Config) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore config")
+ np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
return
}
@@ -23,6 +24,6 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) {
err := np.netmapClient.SetConfig(prm)
if err != nil {
- np.log.Error("can't relay set config event", zap.Error(err))
+ np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
}
}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index e9504cdb4..4d5bdee78 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -5,6 +5,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
@@ -98,7 +99,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/frostfs: balance precision converter is not set")
}
- p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go
index bfa88d3f0..727acc21a 100644
--- a/pkg/innerring/processors/governance/handlers.go
+++ b/pkg/innerring/processors/governance/handlers.go
@@ -1,6 +1,7 @@
package governance
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"github.com/nspcc-dev/neo-go/pkg/core/native"
@@ -30,14 +31,14 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
return
}
- gp.log.Info("new event", zap.String("type", typ))
+ gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
// send event to the worker pool
err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) })
if err != nil {
// there system can be moved into controlled degradation stage
- gp.log.Warn("governance worker pool drained",
+ gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
zap.Int("capacity", gp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go
index 3504e7a53..629d8741e 100644
--- a/pkg/innerring/processors/governance/process_update.go
+++ b/pkg/innerring/processors/governance/process_update.go
@@ -6,6 +6,7 @@ import (
"sort"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -20,37 +21,37 @@ const (
func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
if !gp.alphabetState.IsAlphabet() {
- gp.log.Info("non alphabet mode, ignore alphabet sync")
+ gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
return
}
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
- gp.log.Error("can't fetch alphabet list from main net",
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.String("error", err.Error()))
return
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
- gp.log.Error("can't fetch alphabet list from side chain",
+ gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
zap.String("error", err.Error()))
return
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
- gp.log.Error("can't merge alphabet lists from main net and side chain",
+ gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
zap.String("error", err.Error()))
return
}
if newAlphabet == nil {
- gp.log.Info("no governance update, alphabet list has not been changed")
+ gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
return
}
- gp.log.Info("alphabet list has been changed, starting update",
+ gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
zap.String("new_alphabet", prettyKeys(newAlphabet)),
)
@@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
// 1. Vote to sidechain committee via alphabet contracts.
err = gp.voter.VoteForSidechainValidator(votePrm)
if err != nil {
- gp.log.Error("can't vote for side chain committee",
+ gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
zap.String("error", err.Error()))
}
@@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
// 4. Update FrostFS contract in the mainnet.
gp.updateFrostFSContractInMainnet(newAlphabet)
- gp.log.Info("finished alphabet list update")
+ gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
}
func prettyKeys(keys keys.PublicKeys) string {
@@ -94,21 +95,21 @@ func prettyKeys(keys keys.PublicKeys) string {
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
- gp.log.Error("can't fetch inner ring list from side chain",
+ gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.String("error", err.Error()))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
- gp.log.Error("can't create new inner ring list with new alphabet keys",
+ gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
return
}
sort.Sort(newInnerRing)
- gp.log.Info("update of the inner ring list",
+ gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
zap.String("before", prettyKeys(innerRing)),
zap.String("after", prettyKeys(newInnerRing)),
)
@@ -130,7 +131,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
}
if err != nil {
- gp.log.Error("can't update inner ring list with new alphabet keys",
+ gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
}
}
@@ -147,7 +148,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx
err := gp.morphClient.UpdateNotaryList(updPrm)
if err != nil {
- gp.log.Error("can't update list of notary nodes in side chain",
+ gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.String("error", err.Error()))
}
}
@@ -167,7 +168,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
err := gp.frostfsClient.AlphabetUpdate(prm)
if err != nil {
- gp.log.Error("can't update list of alphabet nodes in frostfs contract",
+ gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.String("error", err.Error()))
}
}
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index 54e4ea3ab..76b27c891 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -3,6 +3,7 @@ package netmap
import (
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
@@ -12,21 +13,21 @@ import (
func (np *Processor) HandleNewEpochTick(ev event.Event) {
_ = ev.(timerEvent.NewEpochTick)
- np.log.Info("tick", zap.String("type", "epoch"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
// send an event to the worker pool
err := np.pool.Submit(func() { np.processNewEpochTick() })
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleNewEpoch(ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info("notification",
+ np.log.Info(logs.NetmapNotification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
@@ -37,7 +38,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
@@ -45,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
func (np *Processor) handleAddPeer(ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info("notification",
+ np.log.Info(logs.NetmapNotification,
zap.String("type", "add peer"),
)
@@ -56,14 +57,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleUpdateState(ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info("notification",
+ np.log.Info(logs.NetmapNotification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
@@ -74,21 +75,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
func (np *Processor) handleCleanupTick(ev event.Event) {
if !np.netmapSnapshot.enabled {
- np.log.Debug("netmap clean up routine is disabled")
+ np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
return
}
cleanup := ev.(netmapCleanupTick)
- np.log.Info("tick", zap.String("type", "netmap cleaner"))
+ np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
// send event to the worker pool
err := np.pool.Submit(func() {
@@ -96,7 +97,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
@@ -104,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
func (np *Processor) handleRemoveNode(ev event.Event) {
removeNode := ev.(subnetevents.RemoveNode)
- np.log.Info("notification",
+ np.log.Info(logs.NetmapNotification,
zap.String("type", "remove node from subnet"),
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
zap.String("key", hex.EncodeToString(removeNode.Node())),
@@ -115,7 +116,7 @@ func (np *Processor) handleRemoveNode(ev event.Event) {
})
if err != nil {
// there system can be moved into controlled degradation stage
- np.log.Warn("netmap worker pool drained",
+ np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
zap.Int("capacity", np.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go
index e4425ef17..d50c69c78 100644
--- a/pkg/innerring/processors/netmap/process_cleanup.go
+++ b/pkg/innerring/processors/netmap/process_cleanup.go
@@ -2,6 +2,7 @@ package netmap
import (
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
@@ -9,7 +10,7 @@ import (
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new netmap cleanup tick")
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
return
}
@@ -17,13 +18,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
key, err := keys.NewPublicKeyFromString(s)
if err != nil {
- np.log.Warn("can't decode public key of netmap node",
+ np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
zap.String("key", s))
return nil
}
- np.log.Info("vote to remove node from netmap", zap.String("key", s))
+ np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
// In notary environments we call UpdateStateIR method instead of UpdateState.
// It differs from UpdateState only by name, so we can do this in the same form.
@@ -48,13 +49,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
)
}
if err != nil {
- np.log.Error("can't invoke netmap.UpdateState", zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
}
return nil
})
if err != nil {
- np.log.Warn("can't iterate on netmap cleaner cache",
+ np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
zap.String("error", err.Error()))
}
}
diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go
index ffcddc497..ebf128f82 100644
--- a/pkg/innerring/processors/netmap/process_epoch.go
+++ b/pkg/innerring/processors/netmap/process_epoch.go
@@ -1,6 +1,7 @@
package netmap
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
@@ -16,7 +17,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
- np.log.Warn("can't get epoch duration",
+ np.log.Warn(logs.NetmapCantGetEpochDuration,
zap.String("error", err.Error()))
} else {
np.epochState.SetEpochDuration(epochDuration)
@@ -26,20 +27,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
h, err := np.netmapClient.Morph().TxHeight(ev.TxHash())
if err != nil {
- np.log.Warn("can't get transaction height",
+ np.log.Warn(logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
zap.String("error", err.Error()))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
- np.log.Warn("can't reset epoch timer",
+ np.log.Warn(logs.NetmapCantResetEpochTimer,
zap.String("error", err.Error()))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
- np.log.Warn("can't get netmap snapshot to perform cleanup",
+ np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.String("error", err.Error()))
return
@@ -54,7 +55,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
err = np.containerWrp.StartEstimation(prm)
if err != nil {
- np.log.Warn("can't start container size estimation",
+ np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
}
@@ -71,15 +72,15 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
// Process new epoch tick by invoking new epoch method in network map contract.
func (np *Processor) processNewEpochTick() {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new epoch tick")
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
return
}
nextEpoch := np.epochState.EpochCounter() + 1
- np.log.Debug("next epoch", zap.Uint64("value", nextEpoch))
+ np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
err := np.netmapClient.NewEpoch(nextEpoch)
if err != nil {
- np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
}
}
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 3734bae01..ffaad3b4e 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
@@ -16,7 +17,7 @@ import (
// local epoch timer.
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore new peer notification")
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
return
}
@@ -25,7 +26,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
tx := originalRequest.MainTransaction
ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers)
if err != nil || !ok {
- np.log.Warn("non-halt notary transaction",
+ np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
zap.String("method", "netmap.AddPeer"),
zap.String("hash", tx.Hash().StringLE()),
zap.Error(err))
@@ -37,14 +38,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
var nodeInfo netmap.NodeInfo
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
// it will be nice to have tx id at event structure to log it
- np.log.Warn("can't parse network map candidate")
+ np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
return
}
// validate and update node info
err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
- np.log.Warn("could not verify and update information about network map candidate",
+ np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.String("error", err.Error()),
)
@@ -62,7 +63,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
if updated {
- np.log.Info("approving network map candidate",
+ np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
prm := netmapclient.AddPeerPrm{}
@@ -89,7 +90,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
}
if err != nil {
- np.log.Error("can't invoke netmap.AddPeer", zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
}
}
}
@@ -97,7 +98,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
// Process update peer notification by sending approval tx to the smart contract.
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore update peer notification")
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
return
}
@@ -110,7 +111,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
if ev.Maintenance() {
err = np.nodeStateSettings.MaintenanceModeAllowed()
if err != nil {
- np.log.Info("prevent switching node to maintenance state",
+ np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
zap.Error(err),
)
@@ -135,19 +136,19 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
err = np.netmapClient.UpdatePeerState(prm)
}
if err != nil {
- np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err))
+ np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
}
}
func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
if !np.alphabetState.IsAlphabet() {
- np.log.Info("non alphabet mode, ignore remove node from subnet notification")
+ np.log.Info(logs.NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification)
return
}
candidates, err := np.netmapClient.GetCandidates()
if err != nil {
- np.log.Warn("could not get network map candidates",
+ np.log.Warn(logs.NetmapCouldNotGetNetworkMapCandidates,
zap.Error(err),
)
return
@@ -158,14 +159,14 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
if err != nil {
- np.log.Warn("could not unmarshal subnet id",
+ np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId,
zap.Error(err),
)
return
}
if subnetid.IsZero(subnetToRemoveFrom) {
- np.log.Warn("got zero subnet in remove node notification")
+ np.log.Warn(logs.NetmapGotZeroSubnetInRemoveNodeNotification)
return
}
@@ -182,8 +183,8 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
return nil
})
if err != nil {
- np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err))
- np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node())))
+ np.log.Warn(logs.NetmapCouldNotIterateOverSubnetworksOfTheNode, zap.Error(err))
+ np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", hex.EncodeToString(ev.Node())))
prm := netmapclient.UpdatePeerPrm{}
prm.SetKey(ev.Node())
@@ -191,7 +192,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = np.netmapClient.UpdatePeerState(prm)
if err != nil {
- np.log.Error("could not invoke netmap.UpdateState", zap.Error(err))
+ np.log.Error(logs.NetmapCouldNotInvokeNetmapUpdateState, zap.Error(err))
return
}
} else {
@@ -201,7 +202,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = np.netmapClient.AddPeer(prm)
if err != nil {
- np.log.Error("could not invoke netmap.AddPeer", zap.Error(err))
+ np.log.Error(logs.NetmapCouldNotInvokeNetmapAddPeer, zap.Error(err))
return
}
}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index de145d48c..85a123ef3 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
@@ -142,7 +143,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/netmap: node state settings is not set")
}
- p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go
index 36c9579e5..30e3e9503 100644
--- a/pkg/innerring/processors/reputation/handlers.go
+++ b/pkg/innerring/processors/reputation/handlers.go
@@ -3,6 +3,7 @@ package reputation
import (
"encoding/hex"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
"go.uber.org/zap"
@@ -13,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
peerID := put.PeerID()
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Info("notification",
+ rp.log.Info(logs.ReputationNotification,
zap.String("type", "reputation put"),
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
@@ -22,7 +23,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
err := rp.pool.Submit(func() { rp.processPut(&put) })
if err != nil {
// there system can be moved into controlled degradation stage
- rp.log.Warn("reputation worker pool drained",
+ rp.log.Warn(logs.ReputationReputationWorkerPoolDrained,
zap.Int("capacity", rp.pool.Cap()))
}
}
diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go
index 31e93763b..f8814dd06 100644
--- a/pkg/innerring/processors/reputation/process_put.go
+++ b/pkg/innerring/processors/reputation/process_put.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
@@ -16,7 +17,7 @@ var errWrongManager = errors.New("got manager that is incorrect for peer")
func (rp *Processor) processPut(e *reputationEvent.Put) {
if !rp.alphabetState.IsAlphabet() {
- rp.log.Info("non alphabet mode, ignore reputation put notification")
+ rp.log.Info(logs.ReputationNonAlphabetModeIgnoreReputationPutNotification)
return
}
@@ -27,7 +28,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check if epoch is valid
currentEpoch := rp.epochState.EpochCounter()
if epoch >= currentEpoch {
- rp.log.Info("ignore reputation value",
+ rp.log.Info(logs.ReputationIgnoreReputationValue,
zap.String("reason", "invalid epoch number"),
zap.Uint64("trust_epoch", epoch),
zap.Uint64("local_epoch", currentEpoch))
@@ -37,7 +38,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check signature
if !value.VerifySignature() {
- rp.log.Info("ignore reputation value",
+ rp.log.Info(logs.ReputationIgnoreReputationValue,
zap.String("reason", "invalid signature"),
)
@@ -46,7 +47,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
// check if manager is correct
if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
- rp.log.Info("ignore reputation value",
+ rp.log.Info(logs.ReputationIgnoreReputationValue,
zap.String("reason", "wrong manager"),
zap.String("error", err.Error()))
@@ -91,7 +92,7 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
}
if err != nil {
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Warn("can't send approval tx for reputation value",
+ rp.log.Warn(logs.ReputationCantSendApprovalTxForReputationValue,
zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
zap.String("error", err.Error()))
}
diff --git a/pkg/innerring/processors/reputation/processor.go b/pkg/innerring/processors/reputation/processor.go
index 990358257..a248fa75f 100644
--- a/pkg/innerring/processors/reputation/processor.go
+++ b/pkg/innerring/processors/reputation/processor.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
@@ -71,7 +72,7 @@ func New(p *Params) (*Processor, error) {
return nil, errors.New("ir/reputation: manager builder is not set")
}
- p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize))
+ p.Log.Debug(logs.ReputationReputationWorkerPool, zap.Int("size", p.PoolSize))
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
if err != nil {
diff --git a/pkg/innerring/processors/settlement/audit/calculate.go b/pkg/innerring/processors/settlement/audit/calculate.go
index d819865d8..75b8c56a4 100644
--- a/pkg/innerring/processors/settlement/audit/calculate.go
+++ b/pkg/innerring/processors/settlement/audit/calculate.go
@@ -7,6 +7,7 @@ import (
"encoding/hex"
"math/big"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
@@ -58,32 +59,32 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
)}
if p.Epoch == 0 {
- log.Info("settlements are ignored for zero epoch")
+ log.Info(logs.AuditSettlementsAreIgnoredForZeroEpoch)
return
}
- log.Info("calculate audit settlements")
+ log.Info(logs.AuditCalculateAuditSettlements)
- log.Debug("getting results for the previous epoch")
+ log.Debug(logs.AuditGettingResultsForThePreviousEpoch)
prevEpoch := p.Epoch - 1
auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch)
if err != nil {
- log.Error("could not collect audit results")
+ log.Error(logs.AuditCouldNotCollectAuditResults)
return
} else if len(auditResults) == 0 {
- log.Debug("no audit results in previous epoch")
+ log.Debug(logs.AuditNoAuditResultsInPreviousEpoch)
return
}
auditFee, err := c.prm.AuditFeeFetcher.AuditFee()
if err != nil {
- log.Warn("can't fetch audit fee from network config",
+ log.Warn(logs.AuditCantFetchAuditFeeFromNetworkConfig,
zap.String("error", err.Error()))
auditFee = 0
}
- log.Debug("processing audit results",
+ log.Debug(logs.AuditProcessingAuditResults,
zap.Int("number", len(auditResults)),
)
@@ -98,7 +99,7 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
})
}
- log.Debug("processing transfers")
+ log.Debug(logs.AuditProcessingTransfers)
common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch))
}
@@ -109,35 +110,35 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
)}
- ctx.log.Debug("reading information about the container")
+ ctx.log.Debug(logs.AuditReadingInformationAboutTheContainer)
ok := c.readContainerInfo(ctx)
if !ok {
return
}
- ctx.log.Debug("building placement")
+ ctx.log.Debug(logs.AuditBuildingPlacement)
ok = c.buildPlacement(ctx)
if !ok {
return
}
- ctx.log.Debug("collecting passed nodes")
+ ctx.log.Debug(logs.AuditCollectingPassedNodes)
ok = c.collectPassNodes(ctx)
if !ok {
return
}
- ctx.log.Debug("calculating sum of the sizes of all storage groups")
+ ctx.log.Debug(logs.AuditCalculatingSumOfTheSizesOfAllStorageGroups)
ok = c.sumSGSizes(ctx)
if !ok {
return
}
- ctx.log.Debug("filling transfer table")
+ ctx.log.Debug(logs.AuditFillingTransferTable)
c.fillTransferTable(ctx)
}
@@ -145,7 +146,7 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
cnr, ok := ctx.auditResult.Container()
if !ok {
- ctx.log.Error("missing container in audit result")
+ ctx.log.Error(logs.AuditMissingContainerInAuditResult)
return false
}
@@ -153,7 +154,7 @@ func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr)
if err != nil {
- ctx.log.Error("could not get container info",
+ ctx.log.Error(logs.AuditCouldNotGetContainerInfo,
zap.String("error", err.Error()),
)
}
@@ -166,14 +167,14 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool {
ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID())
if err != nil {
- ctx.log.Error("could not get container nodes",
+ ctx.log.Error(logs.AuditCouldNotGetContainerNodes,
zap.String("error", err.Error()),
)
}
empty := len(ctx.cnrNodes) == 0
if empty {
- ctx.log.Debug("empty list of container nodes")
+ ctx.log.Debug(logs.AuditEmptyListOfContainerNodes)
}
return err == nil && !empty
@@ -206,7 +207,7 @@ func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool {
empty := len(ctx.passNodes) == 0
if empty {
- ctx.log.Debug("none of the container nodes passed the audit")
+ ctx.log.Debug(logs.AuditNoneOfTheContainerNodesPassedTheAudit)
}
return !empty
@@ -224,7 +225,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
if err != nil {
- ctx.log.Error("could not get SG info",
+ ctx.log.Error(logs.AuditCouldNotGetSGInfo,
zap.String("id", id.String()),
zap.String("error", err.Error()),
)
@@ -244,7 +245,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
}
if sumPassSGSize == 0 {
- ctx.log.Debug("zero sum SG size")
+ ctx.log.Debug(logs.AuditZeroSumSGSize)
return false
}
@@ -260,7 +261,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
for k, info := range ctx.passNodes {
ownerID, err := c.prm.AccountStorage.ResolveKey(info)
if err != nil {
- ctx.log.Error("could not resolve public key of the storage node",
+ ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode,
zap.String("error", err.Error()),
zap.String("key", k),
)
@@ -270,7 +271,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
price := info.Price()
- ctx.log.Debug("calculating storage node salary for audit (GASe-12)",
+ ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAudit,
zap.Stringer("sum SG size", ctx.sumSGSize),
zap.Stringer("price", price),
)
@@ -292,7 +293,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
// add txs to pay inner ring node for audit result
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
if err != nil {
- ctx.log.Error("could not parse public key of the inner ring node",
+ ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode,
zap.String("error", err.Error()),
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
)
diff --git a/pkg/innerring/processors/settlement/basic/collect.go b/pkg/innerring/processors/settlement/basic/collect.go
index ee7354c4f..024769c06 100644
--- a/pkg/innerring/processors/settlement/basic/collect.go
+++ b/pkg/innerring/processors/settlement/basic/collect.go
@@ -3,6 +3,7 @@ package basic
import (
"math/big"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"go.uber.org/zap"
@@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Collect() {
cachedRate, err := inc.rate.BasicRate()
if err != nil {
- inc.log.Error("can't get basic income rate",
+ inc.log.Error(logs.BasicCantGetBasicIncomeRate,
zap.String("error", err.Error()))
return
@@ -33,7 +34,7 @@ func (inc *IncomeSettlementContext) Collect() {
cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
if err != nil {
- inc.log.Error("can't fetch container size estimations",
+ inc.log.Error(logs.BasicCantFetchContainerSizeEstimations,
zap.Uint64("epoch", inc.epoch),
zap.String("error", err.Error()))
@@ -45,7 +46,7 @@ func (inc *IncomeSettlementContext) Collect() {
for i := range cnrEstimations {
owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
if err != nil {
- inc.log.Warn("can't fetch container info",
+ inc.log.Warn(logs.BasicCantFetchContainerInfo,
zap.Uint64("epoch", inc.epoch),
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
zap.String("error", err.Error()))
@@ -55,7 +56,7 @@ func (inc *IncomeSettlementContext) Collect() {
cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
if err != nil {
- inc.log.Debug("can't fetch container info",
+ inc.log.Debug(logs.BasicCantFetchContainerInfo,
zap.Uint64("epoch", inc.epoch),
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
zap.String("error", err.Error()))
diff --git a/pkg/innerring/processors/settlement/basic/distribute.go b/pkg/innerring/processors/settlement/basic/distribute.go
index e085f1e22..44a8ccea3 100644
--- a/pkg/innerring/processors/settlement/basic/distribute.go
+++ b/pkg/innerring/processors/settlement/basic/distribute.go
@@ -4,6 +4,7 @@ import (
"encoding/hex"
"math/big"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
"go.uber.org/zap"
)
@@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Distribute() {
bankBalance, err := inc.balances.Balance(inc.bankOwner)
if err != nil {
- inc.log.Error("can't fetch balance of banking account",
+ inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount,
zap.String("error", err.Error()))
return
@@ -31,7 +32,7 @@ func (inc *IncomeSettlementContext) Distribute() {
inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
if err != nil {
- inc.log.Warn("can't transform public key to owner id",
+ inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerID,
zap.String("public_key", hex.EncodeToString(key)),
zap.String("error", err.Error()))
diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go
index 33191662b..2687ad206 100644
--- a/pkg/innerring/processors/settlement/calls.go
+++ b/pkg/innerring/processors/settlement/calls.go
@@ -1,6 +1,7 @@
package settlement
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
@@ -14,7 +15,7 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
- p.log.Info("non alphabet mode, ignore audit payments")
+ p.log.Info(logs.SettlementNonAlphabetModeIgnoreAuditPayments)
return
}
@@ -23,10 +24,10 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
zap.Uint64("epoch", epoch),
)}
- log.Info("new audit settlement event")
+ log.Info(logs.SettlementNewAuditSettlementEvent)
if epoch == 0 {
- log.Debug("ignore genesis epoch")
+ log.Debug(logs.SettlementIgnoreGenesisEpoch)
return
}
@@ -38,14 +39,14 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
err := p.pool.Submit(handler.handle)
if err != nil {
- log.Warn("could not add handler of AuditEvent to queue",
+ log.Warn(logs.SettlementCouldNotAddHandlerOfAuditEventToQueue,
zap.String("error", err.Error()),
)
return
}
- log.Debug("AuditEvent handling successfully scheduled")
+ log.Debug(logs.SettlementAuditEventHandlingSuccessfullyScheduled)
}
func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
@@ -53,19 +54,19 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
- p.log.Info("non alphabet mode, ignore income collection event")
+ p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeCollectionEvent)
return
}
- p.log.Info("start basic income collection",
+ p.log.Info(logs.SettlementStartBasicIncomeCollection,
zap.Uint64("epoch", epoch))
p.contextMu.Lock()
defer p.contextMu.Unlock()
if _, ok := p.incomeContexts[epoch]; ok {
- p.log.Error("income context already exists",
+ p.log.Error(logs.SettlementIncomeContextAlreadyExists,
zap.Uint64("epoch", epoch))
return
@@ -73,7 +74,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
incomeCtx, err := p.basicIncome.CreateContext(epoch)
if err != nil {
- p.log.Error("can't create income context",
+ p.log.Error(logs.SettlementCantCreateIncomeContext,
zap.String("error", err.Error()))
return
@@ -85,7 +86,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
incomeCtx.Collect()
})
if err != nil {
- p.log.Warn("could not add handler of basic income collection to queue",
+ p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue,
zap.String("error", err.Error()),
)
@@ -98,12 +99,12 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
epoch := ev.Epoch()
if !p.state.IsAlphabet() {
- p.log.Info("non alphabet mode, ignore income distribution event")
+ p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeDistributionEvent)
return
}
- p.log.Info("start basic income distribution",
+ p.log.Info(logs.SettlementStartBasicIncomeDistribution,
zap.Uint64("epoch", epoch))
p.contextMu.Lock()
@@ -113,7 +114,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
delete(p.incomeContexts, epoch)
if !ok {
- p.log.Warn("income context distribution does not exists",
+ p.log.Warn(logs.SettlementIncomeContextDistributionDoesNotExists,
zap.Uint64("epoch", epoch))
return
@@ -123,7 +124,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
incomeCtx.Distribute()
})
if err != nil {
- p.log.Warn("could not add handler of basic income distribution to queue",
+ p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue,
zap.String("error", err.Error()),
)
diff --git a/pkg/innerring/processors/settlement/handlers.go b/pkg/innerring/processors/settlement/handlers.go
index f73b61983..e69d829eb 100644
--- a/pkg/innerring/processors/settlement/handlers.go
+++ b/pkg/innerring/processors/settlement/handlers.go
@@ -1,6 +1,9 @@
package settlement
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+)
type auditEventHandler struct {
log *logger.Logger
@@ -11,9 +14,9 @@ type auditEventHandler struct {
}
func (p *auditEventHandler) handle() {
- p.log.Info("process audit settlements")
+ p.log.Info(logs.SettlementProcessAuditSettlements)
p.proc.ProcessAuditSettlements(p.epoch)
- p.log.Info("audit processing finished")
+ p.log.Info(logs.SettlementAuditProcessingFinished)
}
diff --git a/pkg/innerring/processors/settlement/processor.go b/pkg/innerring/processors/settlement/processor.go
index e86666d5c..1870a0351 100644
--- a/pkg/innerring/processors/settlement/processor.go
+++ b/pkg/innerring/processors/settlement/processor.go
@@ -4,6 +4,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -63,7 +64,7 @@ func New(prm Prm, opts ...Option) *Processor {
panic(fmt.Errorf("could not create worker pool: %w", err))
}
- o.log.Debug("worker pool for settlement processor successfully initialized",
+ o.log.Debug(logs.SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized,
zap.Int("capacity", o.poolSize),
)
diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go
index 8e96deb7b..9a89c4fcb 100644
--- a/pkg/innerring/rpc.go
+++ b/pkg/innerring/rpc.go
@@ -6,6 +6,7 @@ import (
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
@@ -94,7 +95,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
cli, err := c.getWrappedClient(info)
if err != nil {
- c.log.Warn("can't setup remote connection",
+ c.log.Warn(logs.InnerringCantSetupRemoteConnection,
zap.String("error", err.Error()))
continue
@@ -109,7 +110,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
cancel()
if err != nil {
- c.log.Warn("can't get storage group object",
+ c.log.Warn(logs.InnerringCantGetStorageGroupObject,
zap.String("error", err.Error()))
continue
diff --git a/pkg/innerring/settlement.go b/pkg/innerring/settlement.go
index 08e7a9f4d..90255f5c1 100644
--- a/pkg/innerring/settlement.go
+++ b/pkg/innerring/settlement.go
@@ -9,6 +9,7 @@ import (
"fmt"
"math/big"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
@@ -223,7 +224,7 @@ func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, det
)
if !amount.IsInt64() {
- s.log.Error("amount can not be represented as an int64")
+ s.log.Error(logs.InnerringAmountCanNotBeRepresentedAsAnInt64)
return
}
@@ -262,7 +263,7 @@ func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient
for i := range estimationIDs {
estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i])
if err != nil {
- b.log.Warn("can't get used space estimation",
+ b.log.Warn(logs.InnerringCantGetUsedSpaceEstimation,
zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])),
zap.String("error", err.Error()))
diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go
index 903d9c876..e3bf7886e 100644
--- a/pkg/innerring/state.go
+++ b/pkg/innerring/state.go
@@ -4,6 +4,7 @@ import (
"fmt"
"sort"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
@@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool {
func (s *Server) InnerRingIndex() int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
- s.log.Error("can't get inner ring index", zap.String("error", err.Error()))
+ s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
return -1
}
@@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int {
func (s *Server) InnerRingSize() int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
- s.log.Error("can't get inner ring size", zap.String("error", err.Error()))
+ s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
return 0
}
@@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int {
func (s *Server) AlphabetIndex() int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
- s.log.Error("can't get alphabet index", zap.String("error", err.Error()))
+ s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
return -1
}
@@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
index := s.InnerRingIndex()
if s.contracts.alphabet.indexOutOfRange(index) {
- s.log.Info("ignore validator vote: node not in alphabet range")
+ s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
return nil
}
if len(validators) == 0 {
- s.log.Info("ignore validator vote: empty validators list")
+ s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
return nil
}
@@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
if err != nil {
- s.log.Warn("can't invoke vote method in alphabet contract",
+ s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
diff --git a/pkg/innerring/subnet.go b/pkg/innerring/subnet.go
index 5375029d4..03108aac2 100644
--- a/pkg/innerring/subnet.go
+++ b/pkg/innerring/subnet.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet"
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
@@ -173,7 +174,7 @@ func (s *Server) catchSubnetCreation(e event.Event) {
s.handleSubnetCreation(e)
})
if err != nil {
- s.log.Error("subnet creation queue failure",
+ s.log.Error(logs.InnerringSubnetCreationQueueFailure,
zap.String("error", err.Error()),
)
}
@@ -225,7 +226,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
ev: putEv,
})
if err != nil {
- s.log.Info("discard subnet creation",
+ s.log.Info(logs.InnerringDiscardSubnetCreation,
zap.String("reason", err.Error()),
)
@@ -251,7 +252,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
}
if err != nil {
- s.log.Error("approve subnet creation",
+ s.log.Error(logs.InnerringApproveSubnetCreation,
zap.Bool("notary", isNotary),
zap.String("error", err.Error()),
)
@@ -266,7 +267,7 @@ func (s *Server) catchSubnetRemoval(e event.Event) {
s.handleSubnetRemoval(e)
})
if err != nil {
- s.log.Error("subnet removal handling failure",
+ s.log.Error(logs.InnerringSubnetRemovalHandlingFailure,
zap.String("error", err.Error()),
)
}
@@ -280,7 +281,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
candidates, err := s.netmapClient.GetCandidates()
if err != nil {
- s.log.Error("getting netmap candidates",
+ s.log.Error(logs.InnerringGettingNetmapCandidates,
zap.Error(err),
)
@@ -290,7 +291,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
var removedID subnetid.ID
err = removedID.Unmarshal(delEv.ID())
if err != nil {
- s.log.Error("unmarshalling removed subnet ID",
+ s.log.Error(logs.InnerringUnmarshallingRemovedSubnetID,
zap.String("error", err.Error()),
)
@@ -318,8 +319,8 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
return nil
})
if err != nil {
- log.Error("iterating node's subnets", zap.Error(err))
- log.Debug("removing node from netmap candidates")
+ log.Error(logs.InnerringIteratingNodesSubnets, zap.Error(err))
+ log.Debug(logs.InnerringRemovingNodeFromNetmapCandidates)
var updateStatePrm netmapclient.UpdatePeerPrm
updateStatePrm.SetKey(c.PublicKey())
@@ -327,7 +328,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
err = s.netmapClient.UpdatePeerState(updateStatePrm)
if err != nil {
- log.Error("removing node from candidates",
+ log.Error(logs.InnerringRemovingNodeFromCandidates,
zap.Error(err),
)
}
@@ -338,7 +339,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
// remove subnet from node's information
// if it contains removed subnet
if removeSubnet {
- log.Debug("removing subnet from the node")
+ log.Debug(logs.InnerringRemovingSubnetFromTheNode)
var addPeerPrm netmapclient.AddPeerPrm
addPeerPrm.SetNodeInfo(c)
@@ -346,7 +347,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
err = s.netmapClient.AddPeer(addPeerPrm)
if err != nil {
- log.Error("updating subnet info",
+ log.Error(logs.InnerringUpdatingSubnetInfo,
zap.Error(err),
)
}
diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go
index 3912deac0..84274528a 100644
--- a/pkg/local_object_storage/blobovnicza/control.go
+++ b/pkg/local_object_storage/blobovnicza/control.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"go.etcd.io/bbolt"
"go.uber.org/zap"
@@ -14,7 +15,7 @@ import (
//
// If the database file does not exist, it will be created automatically.
func (b *Blobovnicza) Open() error {
- b.log.Debug("creating directory for BoltDB",
+ b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
zap.String("path", b.path),
zap.Bool("ro", b.boltOptions.ReadOnly),
)
@@ -28,7 +29,7 @@ func (b *Blobovnicza) Open() error {
}
}
- b.log.Debug("opening BoltDB",
+ b.log.Debug(logs.BlobovniczaOpeningBoltDB,
zap.String("path", b.path),
zap.Stringer("permissions", b.perm),
)
@@ -44,13 +45,13 @@ func (b *Blobovnicza) Open() error {
//
// Should not be called in read-only configuration.
func (b *Blobovnicza) Init() error {
- b.log.Debug("initializing...",
+ b.log.Debug(logs.BlobovniczaInitializing,
zap.Uint64("object size limit", b.objSizeLimit),
zap.Uint64("storage size limit", b.fullSizeLimit),
)
if size := b.filled.Load(); size != 0 {
- b.log.Debug("already initialized", zap.Uint64("size", size))
+ b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size))
return nil
}
@@ -59,7 +60,7 @@ func (b *Blobovnicza) Init() error {
// create size range bucket
rangeStr := stringifyBounds(lower, upper)
- b.log.Debug("creating bucket for size range",
+ b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
zap.String("range", rangeStr))
_, err := tx.CreateBucketIfNotExists(key)
@@ -86,7 +87,7 @@ func (b *Blobovnicza) Init() error {
// Close releases all internal database resources.
func (b *Blobovnicza) Close() error {
- b.log.Debug("closing BoltDB",
+ b.log.Debug(logs.BlobovniczaClosingBoltDB,
zap.String("path", b.path),
)
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 1f885bd8e..6ce6f349c 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -1,6 +1,7 @@
package blobovnicza
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -51,7 +52,7 @@ func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
err := buck.Delete(addrKey)
if err == nil {
- b.log.Debug("object was removed from bucket",
+ b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
zap.String("binary size", stringifyByteSize(sz)),
zap.String("range", stringifyBounds(lower, upper)),
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index c628c96be..af976f977 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -7,6 +7,7 @@ import (
"strconv"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
@@ -104,12 +105,12 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
// it from opened cache.
return
} else if err := value.Close(); err != nil {
- blz.log.Error("could not close Blobovnicza",
+ blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", p),
zap.String("error", err.Error()),
)
} else {
- blz.log.Debug("blobovnicza successfully closed on evict",
+ blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict,
zap.String("id", p),
)
}
@@ -141,11 +142,11 @@ func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error
//
// if current active blobovnicza's index is not old, it remains unchanged.
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
- b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath))
+ b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath))
_, err := b.updateAndGet(lvlPath, old)
- b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath))
+ b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath))
return err
}
@@ -201,7 +202,7 @@ func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWit
}
b.lruMtx.Unlock()
- b.log.Debug("blobovnicza successfully activated",
+ b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated,
zap.String("path", activePath))
return active, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index e7e890e50..0240c7a97 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -4,6 +4,7 @@ import (
"fmt"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"go.uber.org/zap"
)
@@ -18,10 +19,10 @@ func (b *Blobovniczas) Open(readOnly bool) error {
//
// Should be called exactly once.
func (b *Blobovniczas) Init() error {
- b.log.Debug("initializing Blobovnicza's")
+ b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
if b.readOnly {
- b.log.Debug("read-only mode, skip blobovniczas initialization...")
+ b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
return nil
}
@@ -36,7 +37,7 @@ func (b *Blobovniczas) Init() error {
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
}
- b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p))
+ b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
return false, nil
})
}
@@ -49,7 +50,7 @@ func (b *Blobovniczas) Close() error {
for p, v := range b.active {
if err := v.blz.Close(); err != nil {
- b.log.Debug("could not close active blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", p),
zap.String("error", err.Error()),
)
@@ -59,7 +60,7 @@ func (b *Blobovniczas) Close() error {
for _, k := range b.opened.Keys() {
blz, _ := b.opened.Get(k)
if err := blz.Close(); err != nil {
- b.log.Debug("could not close active blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
zap.String("path", k),
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index 7e14d6d8d..202807653 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -3,6 +3,7 @@ package blobovniczatree
import (
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -44,7 +45,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from level",
+ b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
)
@@ -83,7 +84,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
if res, err := b.deleteObject(v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from opened blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@@ -102,7 +103,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not remove object from active blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
index 748843ee9..9d9fd4cba 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.opentelemetry.io/otel/attribute"
@@ -47,7 +48,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()))
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
index bb84db086..0b8ccb64f 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go
@@ -7,6 +7,7 @@ import (
"path/filepath"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -53,7 +54,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
)
@@ -88,7 +89,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
if res, err := b.getObject(ctx, v, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read object from opened blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@@ -108,7 +109,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from active blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
index b12cb32d4..d6dfe51bd 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
@@ -8,6 +8,7 @@ import (
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -54,7 +55,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if err != nil {
outOfBounds := isErrOutOfRange(err)
if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not get object from level",
+ b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
)
@@ -98,7 +99,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
return res, err
default:
if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read payload range from opened blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
@@ -123,7 +124,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
return res, err
default:
if !blobovnicza.IsErrNotFound(err) {
- b.log.Debug("could not read payload range from active blobovnicza",
+ b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza,
zap.String("path", blzPath),
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index db7ca1082..8b29119c6 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -4,6 +4,7 @@ import (
"errors"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.etcd.io/bbolt"
@@ -56,9 +57,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
active, err := i.B.getActivated(path)
if err != nil {
if !isLogical(err) {
- i.B.reportError("could not get active blobovnicza", err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
- i.B.log.Debug("could not get active blobovnicza",
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
zap.String("error", err.Error()))
}
@@ -71,15 +72,15 @@ func (i *putIterator) iterate(path string) (bool, error) {
// and `updateActive` takes care of not updating the active blobovnicza twice.
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
if isFull {
- i.B.log.Debug("blobovnicza overflowed",
+ i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
}
if err := i.B.updateActive(path, &active.ind); err != nil {
if !isLogical(err) {
- i.B.reportError("could not update active blobovnicza", err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err)
} else {
- i.B.log.Debug("could not update active blobovnicza",
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza,
zap.String("level", path),
zap.String("error", err.Error()))
}
@@ -92,9 +93,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
i.AllFull = false
if !isLogical(err) {
- i.B.reportError("could not put object to active blobovnicza", err)
+ i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
} else {
- i.B.log.Debug("could not put object to active blobovnicza",
+ i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
zap.String("error", err.Error()))
}
diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go
index 6ceb9cefa..abe39575b 100644
--- a/pkg/local_object_storage/blobstor/control.go
+++ b/pkg/local_object_storage/blobstor/control.go
@@ -4,12 +4,13 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
// Open opens BlobStor.
func (b *BlobStor) Open(readOnly bool) error {
- b.log.Debug("opening...")
+ b.log.Debug(logs.BlobstorOpening)
for i := range b.storage {
err := b.storage[i].Storage.Open(readOnly)
@@ -29,7 +30,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
//
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
func (b *BlobStor) Init() error {
- b.log.Debug("initializing...")
+ b.log.Debug(logs.BlobstorInitializing)
if err := b.compression.Init(); err != nil {
return err
@@ -46,13 +47,13 @@ func (b *BlobStor) Init() error {
// Close releases all internal resources of BlobStor.
func (b *BlobStor) Close() error {
- b.log.Debug("closing...")
+ b.log.Debug(logs.BlobstorClosing)
var firstErr error
for i := range b.storage {
err := b.storage[i].Storage.Close()
if err != nil {
- b.log.Info("couldn't close storage", zap.String("error", err.Error()))
+ b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
if firstErr == nil {
firstErr = err
}
diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go
index 5882c33e0..3c76764a9 100644
--- a/pkg/local_object_storage/blobstor/exists.go
+++ b/pkg/local_object_storage/blobstor/exists.go
@@ -5,6 +5,7 @@ import (
"encoding/hex"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -57,7 +58,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
}
for _, err := range errors[:len(errors)-1] {
- b.log.Warn("error occurred during object existence checking",
+ b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
zap.String("error", err.Error()))
}
diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go
index 0461dd803..2c37ee776 100644
--- a/pkg/local_object_storage/blobstor/iterate.go
+++ b/pkg/local_object_storage/blobstor/iterate.go
@@ -3,6 +3,7 @@ package blobstor
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -38,7 +39,7 @@ func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, d
}
prm.IgnoreErrors = true
prm.ErrorHandler = func(addr oid.Address, err error) error {
- blz.log.Warn("error occurred during the iteration",
+ blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
zap.String("err", err.Error()))
return nil
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 3e176dc91..0c422ccc8 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -8,6 +8,7 @@ import (
"strings"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"go.uber.org/zap"
@@ -47,7 +48,7 @@ func (e *StorageEngine) open() error {
for res := range errCh {
if res.err != nil {
- e.log.Error("could not open shard, closing and skipping",
+ e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
@@ -56,7 +57,7 @@ func (e *StorageEngine) open() error {
err := sh.Close()
if err != nil {
- e.log.Error("could not close partially initialized shard",
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -94,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
for res := range errCh {
if res.err != nil {
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
- e.log.Error("could not initialize shard, closing and skipping",
+ e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
zap.String("id", res.id),
zap.Error(res.err))
@@ -103,7 +104,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := sh.Close()
if err != nil {
- e.log.Error("could not close partially initialized shard",
+ e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
zap.String("id", res.id),
zap.Error(res.err))
}
@@ -149,7 +150,7 @@ func (e *StorageEngine) close(releasePools bool) error {
for id, sh := range e.shards {
if err := sh.Close(); err != nil {
- e.log.Debug("could not close shard",
+ e.log.Debug(logs.EngineCouldNotCloseShard,
zap.String("id", id),
zap.String("error", err.Error()),
)
@@ -309,7 +310,7 @@ loop:
for _, p := range shardsToReload {
err := p.sh.Reload(p.opts...)
if err != nil {
- e.log.Error("could not reload a shard",
+ e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
zap.Error(err))
}
@@ -338,7 +339,7 @@ loop:
return fmt.Errorf("could not add %s shard: %w", idStr, err)
}
- e.log.Info("added new shard", zap.String("id", idStr))
+ e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
}
return nil
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 2105c452f..1f3c142a5 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -136,7 +137,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
res, err := sh.Select(selectPrm)
if err != nil {
- e.log.Warn("error during searching for object children",
+ e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
zap.String("error", err.Error()))
return false
@@ -147,7 +148,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
_, err = sh.Inhume(ctx, inhumePrm)
if err != nil {
- e.log.Debug("could not inhume object in shard",
+ e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
zap.String("err", err.Error()))
continue
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index e0161bfe3..20c8a946b 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -4,6 +4,7 @@ import (
"errors"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -87,24 +88,24 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) {
sid := sh.ID()
err := sh.SetMode(mode.DegradedReadOnly)
if err != nil {
- e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only",
+ e.log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
zap.Error(err))
err = sh.SetMode(mode.ReadOnly)
if err != nil {
- e.log.Error("failed to move shard in read-only mode",
+ e.log.Error(logs.EngineFailedToMoveShardInReadonlyMode,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
zap.Error(err))
} else {
- e.log.Info("shard is moved in read-only mode due to error threshold",
+ e.log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount))
}
} else {
- e.log.Info("shard is moved in degraded mode due to error threshold",
+ e.log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold,
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount))
}
@@ -182,7 +183,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
default:
// For background workers we can have a lot of such errors,
// thus logging is done with DEBUG level.
- e.log.Debug("mode change is in progress, ignoring set-mode request",
+ e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
zap.Stringer("shard_id", sid),
zap.Uint32("error_count", errCount))
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index f16413ea2..2ec2c2b35 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -79,7 +80,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
}
}
- e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs))
+ e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs))
var res EvacuateShardRes
@@ -89,7 +90,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
}
}
- e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs))
+ e.log.Info(logs.EngineFinishedShardsEvacuation, zap.Strings("shard_ids", shardIDs))
return res, nil
}
@@ -206,7 +207,7 @@ func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address,
putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
if putDone || exists {
if putDone {
- e.log.Debug("object is moved to another shard",
+ e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
zap.Stringer("from", sh.ID()),
zap.Stringer("to", shards[j].ID()),
zap.Stringer("addr", addr))
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index db9988338..696e78742 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@@ -83,7 +84,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
if !prm.forceRemoval {
locked, err := e.IsLocked(prm.addrs[i])
if err != nil {
- e.log.Warn("removing an object without full locking check",
+ e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
zap.Stringer("addr", prm.addrs[i]))
} else if locked {
@@ -222,7 +223,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
select {
case <-ctx.Done():
- e.log.Info("interrupt processing the expired locks", zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
return true
default:
return false
@@ -236,7 +237,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
select {
case <-ctx.Done():
- e.log.Info("interrupt processing the deleted locks", zap.Error(ctx.Err()))
+ e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
return true
default:
return false
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 5f9105efc..aea296cc4 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -118,7 +119,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
_, err = sh.ToMoveIt(toMoveItPrm)
if err != nil {
- e.log.Warn("could not mark object for shard relocation",
+ e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
zap.Stringer("shard", sh.ID()),
zap.String("error", err.Error()),
)
@@ -135,7 +136,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
- e.log.Warn("could not put object to shard",
+ e.log.Warn(logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()))
return
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index c50c0844c..d365fc7b4 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -42,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
prm.Concurrency = defaultRemoveDuplicatesConcurrency
}
- e.log.Info("starting removal of locally-redundant copies",
+ e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
zap.Int("concurrency", prm.Concurrency))
// The mutext must be taken for the whole duration to avoid target shard being removed
@@ -54,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
// However we could change weights in future and easily forget this function.
for _, sh := range e.shards {
- e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String()))
+ e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.String("shard_id", sh.ID().String()))
ch := make(chan oid.Address)
errG, ctx := errgroup.WithContext(ctx)
@@ -92,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
})
}
if err := errG.Wait(); err != nil {
- e.log.Error("finished removal of locally-redundant copies", zap.Error(err))
+ e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
return err
}
}
- e.log.Info("finished removal of locally-redundant copies")
+ e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
return nil
}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 2b1146ff2..64546d9ef 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -3,6 +3,7 @@ package engine
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -168,7 +169,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
delete(e.shardPools, id)
}
- e.log.Info("shard has been removed",
+ e.log.Info(logs.EngineShardHasBeenRemoved,
zap.String("id", id))
}
e.mtx.Unlock()
@@ -176,7 +177,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
for _, sh := range ss {
err := sh.Close()
if err != nil {
- e.log.Error("could not close removed shard",
+ e.log.Error(logs.EngineCouldNotCloseRemovedShard,
zap.Stringer("id", sh.ID()),
zap.Error(err),
)
diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go
index 1a19c3e2a..4ae802aaa 100644
--- a/pkg/local_object_storage/metabase/control.go
+++ b/pkg/local_object_storage/metabase/control.go
@@ -5,6 +5,7 @@ import (
"fmt"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -25,7 +26,7 @@ func (db *DB) Open(readOnly bool) error {
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
}
- db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path))
+ db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
if db.boltOptions == nil {
opts := *bbolt.DefaultOptions
@@ -46,9 +47,9 @@ func (db *DB) openBolt() error {
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
- db.log.Debug("opened boltDB instance for Metabase")
+ db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
- db.log.Debug("checking metabase version")
+ db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
return db.boltDB.View(func(tx *bbolt.Tx) error {
// The safest way to check if the metabase is fresh is to check if it has no buckets.
// However, shard info can be present. So here we check that the number of buckets is
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 20985f47a..74c261d35 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -7,6 +7,7 @@ import (
"strings"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -267,7 +268,7 @@ func (db *DB) selectFromFKBT(
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
- db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation())))
+ db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation())))
return
}
@@ -290,7 +291,7 @@ func (db *DB) selectFromFKBT(
})
})
if err != nil {
- db.log.Debug("error in FKBT selection", zap.String("error", err.Error()))
+ db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
}
}
@@ -360,13 +361,13 @@ func (db *DB) selectFromList(
case object.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
- db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error()))
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op)))
+ db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
return
}
@@ -374,7 +375,7 @@ func (db *DB) selectFromList(
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
l, err := decodeList(val)
if err != nil {
- db.log.Debug("can't decode list bucket leaf",
+ db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
zap.String("error", err.Error()),
)
@@ -385,7 +386,7 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
- db.log.Debug("can't iterate over the bucket",
+ db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
zap.String("error", err.Error()),
)
@@ -429,7 +430,7 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug("unknown operation",
+ db.log.Debug(logs.MetabaseUnknownOperation,
zap.Uint32("operation", uint32(f.Operation())),
)
@@ -451,7 +452,7 @@ func (db *DB) selectObjectID(
return nil
})
if err != nil {
- db.log.Debug("could not iterate over the buckets",
+ db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index d727d27a5..3d0f72922 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -15,7 +16,7 @@ import (
)
func (s *Shard) handleMetabaseFailure(stage string, err error) error {
- s.log.Error("metabase failure, switching mode",
+ s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.ReadOnly),
zap.Error(err))
@@ -25,7 +26,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error {
return nil
}
- s.log.Error("can't move shard to readonly, switch mode",
+ s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
zap.String("stage", stage),
zap.Stringer("mode", mode.DegradedReadOnly),
zap.Error(err))
@@ -167,7 +168,7 @@ func (s *Shard) refillMetabase() error {
err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
if err := obj.Unmarshal(data); err != nil {
- s.log.Warn("could not unmarshal object",
+ s.log.Warn(logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
zap.String("err", err.Error()))
return nil
@@ -274,7 +275,7 @@ func (s *Shard) Close() error {
for _, component := range components {
if err := component.Close(); err != nil {
lastErr = err
- s.log.Error("could not close shard component", zap.Error(err))
+ s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
}
}
@@ -302,7 +303,7 @@ func (s *Shard) Reload(opts ...Option) error {
ok, err := s.metaBase.Reload(c.metaOpts...)
if err != nil {
if errors.Is(err, meta.ErrDegradedMode) {
- s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err))
+ s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
_ = s.setMode(mode.DegradedReadOnly)
}
return err
@@ -318,12 +319,12 @@ func (s *Shard) Reload(opts ...Option) error {
err = s.metaBase.Init()
}
if err != nil {
- s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err))
+ s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
_ = s.setMode(mode.DegradedReadOnly)
return err
}
}
- s.log.Info("trying to restore read-write mode")
+ s.log.Info(logs.ShardTryingToRestoreReadwriteMode)
return s.setMode(mode.ReadWrite)
}
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index 6ae3bf7dd..ed05f9982 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -3,6 +3,7 @@ package shard
import (
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
@@ -49,7 +50,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
if s.hasWriteCache() {
err := s.writeCache.Delete(prm.addr[i])
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
- s.log.Warn("can't delete object from write cache", zap.String("error", err.Error()))
+ s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error()))
}
}
@@ -58,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
res, err := s.metaBase.StorageID(sPrm)
if err != nil {
- s.log.Debug("can't get storage ID from metabase",
+ s.log.Debug(logs.ShardCantGetStorageIDFromMetabase,
zap.Stringer("object", prm.addr[i]),
zap.String("error", err.Error()))
@@ -100,7 +101,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
_, err = s.blobStor.Delete(delPrm)
if err != nil {
- s.log.Debug("can't remove object from blobStor",
+ s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor,
zap.Stringer("object_address", prm.addr[i]),
zap.String("error", err.Error()))
}
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index 6f18e6c3a..5ea9ecedf 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -124,7 +125,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
for {
event, ok := <-gc.eventChan
if !ok {
- gc.log.Warn("stop event listener by closed channel")
+ gc.log.Warn(logs.ShardStopEventListenerByClosedChannel)
return
}
@@ -149,7 +150,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
v.prevGroup.Done()
})
if err != nil {
- gc.log.Warn("could not submit GC job to worker pool",
+ gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
zap.String("error", err.Error()),
)
@@ -174,7 +175,7 @@ func (gc *gc) tickRemover() {
close(gc.eventChan)
- gc.log.Debug("GC is stopped")
+ gc.log.Debug(logs.ShardGCIsStopped)
return
case <-timer.C:
gc.remover()
@@ -188,7 +189,7 @@ func (gc *gc) stop() {
gc.stopChannel <- struct{}{}
})
- gc.log.Info("waiting for GC workers to stop...")
+ gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
gc.wg.Wait()
}
@@ -220,7 +221,7 @@ func (s *Shard) removeGarbage() {
// (no more than s.rmBatchSize objects)
err := s.metaBase.IterateOverGarbage(iterPrm)
if err != nil {
- s.log.Warn("iterator over metabase graveyard failed",
+ s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
zap.String("error", err.Error()),
)
@@ -235,7 +236,7 @@ func (s *Shard) removeGarbage() {
// delete accumulated objects
_, err = s.delete(deletePrm)
if err != nil {
- s.log.Warn("could not delete the objects",
+ s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
zap.String("error", err.Error()),
)
@@ -295,7 +296,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
})
if err := errGroup.Wait(); err != nil {
- s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error()))
+ s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
}
}
@@ -321,7 +322,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
// inhume the collected objects
res, err := s.metaBase.Inhume(inhumePrm)
if err != nil {
- s.log.Warn("could not inhume the objects",
+ s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
zap.String("error", err.Error()),
)
@@ -342,7 +343,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
epoch := e.(newEpoch).epoch
log := s.log.With(zap.Uint64("epoch", epoch))
- log.Debug("started expired tombstones handling")
+ log.Debug(logs.ShardStartedExpiredTombstonesHandling)
const tssDeleteBatch = 50
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
@@ -360,12 +361,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
})
for {
- log.Debug("iterating tombstones")
+ log.Debug(logs.ShardIteratingTombstones)
s.m.RLock()
if s.info.Mode.NoMetabase() {
- s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones")
+ s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
s.m.RUnlock()
return
@@ -373,7 +374,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
err := s.metaBase.IterateOverGraveyard(iterPrm)
if err != nil {
- log.Error("iterator over graveyard failed", zap.Error(err))
+ log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
s.m.RUnlock()
return
@@ -392,7 +393,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
}
}
- log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp)))
+ log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
s.expiredTombstonesCallback(ctx, tssExp)
iterPrm.SetOffset(tss[tssLen-1].Address())
@@ -400,7 +401,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
tssExp = tssExp[:0]
}
- log.Debug("finished expired tombstones handling")
+ log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
}
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
@@ -442,7 +443,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
})
if err := errGroup.Wait(); err != nil {
- s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error()))
+ s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
}
}
@@ -503,7 +504,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
// inhume tombstones
res, err := s.metaBase.Inhume(pInhume)
if err != nil {
- s.log.Warn("could not mark tombstones as garbage",
+ s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
zap.String("error", err.Error()),
)
@@ -523,7 +524,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
// from graveyard
err = s.metaBase.DropGraves(tss)
if err != nil {
- s.log.Warn("could not drop expired grave records", zap.Error(err))
+ s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
}
}
@@ -535,7 +536,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
}
unlocked, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
- s.log.Warn("failure to unlock objects",
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
zap.String("error", err.Error()),
)
@@ -548,7 +549,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
res, err := s.metaBase.Inhume(pInhume)
if err != nil {
- s.log.Warn("failure to mark lockers as garbage",
+ s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
zap.String("error", err.Error()),
)
@@ -570,7 +571,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
if err != nil {
- s.log.Warn("failure to get expired unlocked objects", zap.Error(err))
+ s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
return
}
@@ -589,7 +590,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
_, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
- s.log.Warn("failure to unlock objects",
+ s.log.Warn(logs.ShardFailureToUnlockObjects,
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 3406b9338..8a0296ac6 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -5,6 +5,7 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -126,7 +127,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{})
}
} else {
- s.log.Warn("fetching object without meta", zap.Stringer("addr", addr))
+ s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
}
if s.hasWriteCache() {
@@ -135,11 +136,11 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
return res, false, err
}
if IsErrNotFound(err) {
- s.log.Debug("object is missing in write-cache",
+ s.log.Debug(logs.ShardObjectIsMissingInWritecache,
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))
} else {
- s.log.Error("failed to fetch object from write-cache",
+ s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
zap.Error(err),
zap.Stringer("addr", addr),
zap.Bool("skip_meta", skipMeta))
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 40a5bf22e..3457188be 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -98,7 +99,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
return InhumeRes{}, ErrLockObjectRemoval
}
- s.log.Debug("could not mark object to delete in metabase",
+ s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
zap.String("error", err.Error()),
)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 9efca8983..bab1090eb 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -3,6 +3,7 @@ package shard
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -86,7 +87,7 @@ func (s *Shard) List() (res SelectRes, err error) {
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
if err != nil {
- s.log.Debug("can't select all objects",
+ s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
zap.String("error", err.Error()))
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 17ed3f3c8..50c52accc 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -1,6 +1,7 @@
package shard
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"go.uber.org/zap"
@@ -25,7 +26,7 @@ func (s *Shard) SetMode(m mode.Mode) error {
}
func (s *Shard) setMode(m mode.Mode) error {
- s.log.Info("setting shard mode",
+ s.log.Info(logs.ShardSettingShardMode,
zap.Stringer("old_mode", s.info.Mode),
zap.Stringer("new_mode", m))
@@ -66,7 +67,7 @@ func (s *Shard) setMode(m mode.Mode) error {
s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite)
}
- s.log.Info("shard mode set successfully",
+ s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
return nil
}
diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go
index c6bf8409e..f3199ac07 100644
--- a/pkg/local_object_storage/shard/move.go
+++ b/pkg/local_object_storage/shard/move.go
@@ -1,6 +1,7 @@
package shard
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -38,7 +39,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
_, err := s.metaBase.ToMoveIt(toMovePrm)
if err != nil {
- s.log.Debug("could not mark object for shard relocation in metabase",
+ s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index 48dbe1be2..a4cb2cb1f 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -3,6 +3,7 @@ package shard
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -58,7 +59,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
}
if err != nil || !tryCache {
if err != nil {
- s.log.Debug("can't put object to the write-cache, trying blobstor",
+ s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
zap.String("err", err.Error()))
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 6d1fba141..44ec54645 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -349,7 +350,7 @@ func (s *Shard) updateMetrics() {
if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
cc, err := s.metaBase.ObjectCounters()
if err != nil {
- s.log.Warn("meta: object counter read",
+ s.log.Warn(logs.ShardMetaObjectCounterRead,
zap.Error(err),
)
@@ -361,7 +362,7 @@ func (s *Shard) updateMetrics() {
cnrList, err := s.metaBase.Containers()
if err != nil {
- s.log.Warn("meta: can't read container list", zap.Error(err))
+ s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
return
}
@@ -370,7 +371,7 @@ func (s *Shard) updateMetrics() {
for i := range cnrList {
size, err := s.metaBase.ContainerSize(cnrList[i])
if err != nil {
- s.log.Warn("meta: can't read container size",
+ s.log.Warn(logs.ShardMetaCantReadContainerSize,
zap.String("cid", cnrList[i].EncodeToString()),
zap.Error(err))
continue
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 0437367e7..3ca3aa905 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -5,6 +5,7 @@ import (
"errors"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -134,7 +135,7 @@ func (c *cache) flushDB() {
c.modeMtx.RUnlock()
- c.log.Debug("tried to flush items from write-cache",
+ c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
zap.Int("count", count),
zap.String("start", base58.Encode(lastKey)))
}
diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go
index ffe7a0129..0ac8cea99 100644
--- a/pkg/local_object_storage/writecache/init.go
+++ b/pkg/local_object_storage/writecache/init.go
@@ -5,6 +5,7 @@ import (
"errors"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
@@ -54,7 +55,7 @@ func (c *cache) initFlushMarks() {
var errStopIter = errors.New("stop iteration")
func (c *cache) fsTreeFlushMarkUpdate() {
- c.log.Info("filling flush marks for objects in FSTree")
+ c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree)
var prm common.IteratePrm
prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error {
@@ -86,11 +87,11 @@ func (c *cache) fsTreeFlushMarkUpdate() {
return nil
}
_, _ = c.fsTree.Iterate(prm)
- c.log.Info("finished updating FSTree flush marks")
+ c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks)
}
func (c *cache) dbFlushMarkUpdate() {
- c.log.Info("filling flush marks for objects in database")
+ c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase)
var m []string
var indices []int
@@ -158,7 +159,7 @@ func (c *cache) dbFlushMarkUpdate() {
lastKey = append([]byte(m[len(m)-1]), 0)
}
- c.log.Info("finished updating flush marks")
+ c.log.Info(logs.WritecacheFinishedUpdatingFlushMarks)
}
// flushStatus returns info about the object state in the main storage.
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 997310d9e..939dc5b06 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -4,6 +4,7 @@ import (
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
)
@@ -59,7 +60,7 @@ func (c *cache) setMode(m mode.Mode) error {
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
// guarantees that there are no in-fly operations.
for len(c.flushCh) != 0 {
- c.log.Info("waiting for channels to flush")
+ c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
time.Sleep(time.Second)
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 667d34cb9..ff7eb1d6a 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -5,6 +5,7 @@ import (
"fmt"
"os"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
@@ -124,7 +125,7 @@ func (c *cache) deleteFromDB(keys []string) []string {
)
}
if err != nil {
- c.log.Error("can't remove objects from the database", zap.Error(err))
+ c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
}
copy(keys, keys[errorIndex:])
@@ -141,13 +142,13 @@ func (c *cache) deleteFromDisk(keys []string) []string {
for i := range keys {
if err := addr.DecodeString(keys[i]); err != nil {
- c.log.Error("can't parse address", zap.String("address", keys[i]))
+ c.log.Error(logs.WritecacheCantParseAddress, zap.String("address", keys[i]))
continue
}
_, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
- c.log.Error("can't remove object from write-cache", zap.Error(err))
+ c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
// Save the key for the next iteration.
keys[copyIndex] = keys[i]
diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go
index 5e98211c4..1c33fa5e0 100644
--- a/pkg/morph/client/client.go
+++ b/pkg/morph/client/client.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -186,7 +187,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
return fmt.Errorf("could not invoke %s: %w", method, err)
}
- c.logger.Debug("neo client invoke",
+ c.logger.Debug(logs.ClientNeoClientInvoke,
zap.String("method", method),
zap.Uint32("vub", vub),
zap.Stringer("tx_hash", txHash.Reverse()))
@@ -269,7 +270,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
return err
}
- c.logger.Debug("native gas transfer invoke",
+ c.logger.Debug(logs.ClientNativeGasTransferInvoke,
zap.String("to", receiver.StringLE()),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -303,7 +304,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
return err
}
- c.logger.Debug("batch gas transfer invoke",
+ c.logger.Debug(logs.ClientBatchGasTransferInvoke,
zap.Strings("to", receiversLog),
zap.Stringer("tx_hash", txHash.Reverse()),
zap.Uint32("vub", vub))
@@ -330,7 +331,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error("can't get blockchain height",
+ c.logger.Error(logs.ClientCantGetBlockchainHeight,
zap.String("error", err.Error()))
return nil
}
@@ -344,7 +345,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
- c.logger.Error("can't get blockchain height",
+ c.logger.Error(logs.ClientCantGetBlockchainHeight243,
zap.String("error", err.Error()))
return nil
}
diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go
index 5d736839a..fab90b446 100644
--- a/pkg/morph/client/multi.go
+++ b/pkg/morph/client/multi.go
@@ -5,6 +5,7 @@ import (
"sort"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc"
@@ -44,7 +45,7 @@ func (c *Client) switchRPC(ctx context.Context) bool {
newEndpoint := c.endpoints.list[c.endpoints.curr].Address
cli, act, err := c.newCli(ctx, newEndpoint)
if err != nil {
- c.logger.Warn("could not establish connection to the switched RPC node",
+ c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode,
zap.String("endpoint", newEndpoint),
zap.Error(err),
)
@@ -54,7 +55,7 @@ func (c *Client) switchRPC(ctx context.Context) bool {
c.cache.invalidate()
- c.logger.Info("connection to the new RPC node has been established",
+ c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished,
zap.String("endpoint", newEndpoint))
subs, ok := c.restoreSubscriptions(ctx, cli, newEndpoint, false)
@@ -147,7 +148,7 @@ func (c *Client) routeEvent(ctx context.Context, e any) {
func (c *Client) reconnect(ctx context.Context) bool {
if closeErr := c.client.GetError(); closeErr != nil {
- c.logger.Warn("switching to the next RPC node",
+ c.logger.Warn(logs.ClientSwitchingToTheNextRPCNode,
zap.String("reason", closeErr.Error()),
)
} else {
@@ -158,7 +159,7 @@ func (c *Client) reconnect(ctx context.Context) bool {
}
if !c.switchRPC(ctx) {
- c.logger.Error("could not establish connection to any RPC node")
+ c.logger.Error(logs.ClientCouldNotEstablishConnectionToAnyRPCNode)
// could not connect to all endpoints =>
// switch client to inactive mode
@@ -210,7 +211,7 @@ mainLoop:
cli, act, err := c.newCli(ctx, tryE)
if err != nil {
- c.logger.Warn("could not create client to the higher priority node",
+ c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
@@ -237,13 +238,13 @@ mainLoop:
c.switchLock.Unlock()
- c.logger.Info("switched to the higher priority RPC",
+ c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC,
zap.String("endpoint", tryE))
return
}
- c.logger.Warn("could not restore side chain subscriptions using node",
+ c.logger.Warn(logs.ClientCouldNotRestoreSideChainSubscriptionsUsingNode,
zap.String("endpoint", tryE),
zap.Error(err),
)
diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go
index 7399c19cd..427554372 100644
--- a/pkg/morph/client/notary.go
+++ b/pkg/morph/client/notary.go
@@ -8,6 +8,7 @@ import (
"math/big"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -203,7 +204,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2
// Transaction is already in mempool waiting to be processed.
// This is an expected situation if we restart the service.
- c.logger.Info("notary deposit has already been made",
+ c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -211,7 +212,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2
return util.Uint256{}, nil
}
- c.logger.Info("notary deposit invoke",
+ c.logger.Info(logs.ClientNotaryDepositInvoke,
zap.Int64("amount", int64(amount)),
zap.Int64("expire_at", till),
zap.Uint32("vub", vub),
@@ -430,7 +431,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error {
return err
}
- c.logger.Debug("notary request with prepared main TX invoked",
+ c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked,
zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
zap.Stringer("tx_hash", resp.Hash().Reverse()))
@@ -489,7 +490,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint
return err
}
- c.logger.Debug("notary request invoked",
+ c.logger.Debug(logs.ClientNotaryRequestInvoked,
zap.String("method", method),
zap.Uint32("valid_until_block", until),
zap.Uint32("fallback_valid_for", c.notary.fallbackTime),
diff --git a/pkg/morph/client/notifications.go b/pkg/morph/client/notifications.go
index 300bab825..69eafc659 100644
--- a/pkg/morph/client/notifications.go
+++ b/pkg/morph/client/notifications.go
@@ -3,6 +3,7 @@ package client
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"github.com/nspcc-dev/neo-go/pkg/core/block"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/neorpc"
@@ -260,7 +261,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie
if si.subscribedToBlocks {
_, err = cli.ReceiveBlocks(nil, blockRcv)
if err != nil {
- c.logger.Error("could not restore block subscription after RPC switch",
+ c.logger.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch,
zap.String("endpoint", endpoint),
zap.Error(err),
)
@@ -274,7 +275,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie
contract := contract // See https://github.com/nspcc-dev/neo-go/issues/2890
id, err = cli.ReceiveExecutionNotifications(&neorpc.NotificationFilter{Contract: &contract}, notificationRcv)
if err != nil {
- c.logger.Error("could not restore notification subscription after RPC switch",
+ c.logger.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch,
zap.String("endpoint", endpoint),
zap.Error(err),
)
@@ -291,7 +292,7 @@ func (c *Client) restoreSubscriptions(ctx context.Context, cli *rpcclient.WSClie
signer := signer // See https://github.com/nspcc-dev/neo-go/issues/2890
id, err = cli.ReceiveNotaryRequests(&neorpc.TxFilter{Signer: &signer}, notaryReqRcv)
if err != nil {
- c.logger.Error("could not restore notary notification subscription after RPC switch",
+ c.logger.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch,
zap.String("endpoint", endpoint),
zap.Error(err),
)
diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go
index 3de199328..405165702 100644
--- a/pkg/morph/event/listener.go
+++ b/pkg/morph/event/listener.go
@@ -6,6 +6,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -133,7 +134,7 @@ var (
func (l *listener) Listen(ctx context.Context) {
l.startOnce.Do(func() {
if err := l.listen(ctx, nil); err != nil {
- l.log.Error("could not start listen to events",
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
zap.String("error", err.Error()),
)
}
@@ -149,7 +150,7 @@ func (l *listener) Listen(ctx context.Context) {
func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
l.startOnce.Do(func() {
if err := l.listen(ctx, intError); err != nil {
- l.log.Error("could not start listen to events",
+ l.log.Error(logs.EventCouldNotStartListenToEvents,
zap.String("error", err.Error()),
)
intError <- err
@@ -221,53 +222,53 @@ loop:
if intErr != nil {
intErr <- err
} else {
- l.log.Error("stop event listener by error", zap.Error(err))
+ l.log.Error(logs.EventStopEventListenerByError, zap.Error(err))
}
break loop
case <-ctx.Done():
- l.log.Info("stop event listener by context",
+ l.log.Info(logs.EventStopEventListenerByContext,
zap.String("reason", ctx.Err().Error()),
)
break loop
case notifyEvent, ok := <-chs.NotificationsCh:
if !ok {
- l.log.Warn("stop event listener by notification channel")
+ l.log.Warn(logs.EventStopEventListenerByNotificationChannel)
if intErr != nil {
intErr <- errors.New("event subscriber connection has been terminated")
}
break loop
} else if notifyEvent == nil {
- l.log.Warn("nil notification event was caught")
+ l.log.Warn(logs.EventNilNotificationEventWasCaught)
continue loop
}
l.handleNotifyEvent(notifyEvent)
case notaryEvent, ok := <-chs.NotaryRequestsCh:
if !ok {
- l.log.Warn("stop event listener by notary channel")
+ l.log.Warn(logs.EventStopEventListenerByNotaryChannel)
if intErr != nil {
intErr <- errors.New("notary event subscriber connection has been terminated")
}
break loop
} else if notaryEvent == nil {
- l.log.Warn("nil notary event was caught")
+ l.log.Warn(logs.EventNilNotaryEventWasCaught)
continue loop
}
l.handleNotaryEvent(notaryEvent)
case b, ok := <-chs.BlockCh:
if !ok {
- l.log.Warn("stop event listener by block channel")
+ l.log.Warn(logs.EventStopEventListenerByBlockChannel)
if intErr != nil {
intErr <- errors.New("new block notification channel is closed")
}
break loop
} else if b == nil {
- l.log.Warn("nil block was caught")
+ l.log.Warn(logs.EventNilBlockWasCaught)
continue loop
}
@@ -282,7 +283,7 @@ func (l *listener) handleBlockEvent(b *block.Block) {
l.blockHandlers[i](b)
}
}); err != nil {
- l.log.Warn("listener worker pool drained",
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
@@ -291,7 +292,7 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) {
if err := l.pool.Submit(func() {
l.parseAndHandleNotary(notaryEvent)
}); err != nil {
- l.log.Warn("listener worker pool drained",
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
@@ -300,7 +301,7 @@ func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEve
if err := l.pool.Submit(func() {
l.parseAndHandleNotification(notifyEvent)
}); err != nil {
- l.log.Warn("listener worker pool drained",
+ l.log.Warn(logs.EventListenerWorkerPoolDrained,
zap.Int("capacity", l.pool.Cap()))
}
}
@@ -327,7 +328,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
l.mtx.RUnlock()
if !ok {
- log.Debug("event parser not set")
+ log.Debug(logs.EventEventParserNotSet)
return
}
@@ -335,7 +336,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
// parse the notification event
event, err := parser(notifyEvent)
if err != nil {
- log.Warn("could not parse notification event",
+ log.Warn(logs.EventCouldNotParseNotificationEvent,
zap.String("error", err.Error()),
)
@@ -348,7 +349,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi
l.mtx.RUnlock()
if len(handlers) == 0 {
- log.Info("notification handlers for parsed notification event were not registered",
+ log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -367,11 +368,11 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
switch {
case errors.Is(err, ErrTXAlreadyHandled):
case errors.Is(err, ErrMainTXExpired):
- l.log.Warn("skip expired main TX notary event",
+ l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent,
zap.String("error", err.Error()),
)
default:
- l.log.Warn("could not prepare and validate notary event",
+ l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent,
zap.String("error", err.Error()),
)
}
@@ -395,7 +396,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Debug("notary parser not set")
+ log.Debug(logs.EventNotaryParserNotSet)
return
}
@@ -403,7 +404,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
// parse the notary event
event, err := parser(notaryEvent)
if err != nil {
- log.Warn("could not parse notary event",
+ log.Warn(logs.EventCouldNotParseNotaryEvent,
zap.String("error", err.Error()),
)
@@ -416,7 +417,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) {
l.mtx.RUnlock()
if !ok {
- log.Info("notary handlers for parsed notification event were not registered",
+ log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered,
zap.Any("event", event),
)
@@ -438,7 +439,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
parser := pi.parser()
if parser == nil {
- log.Info("ignore nil event parser")
+ log.Info(logs.EventIgnoreNilEventParser)
return
}
@@ -447,7 +448,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
// check if the listener was started
if l.started {
- log.Warn("listener has been already started, ignore parser")
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser)
return
}
@@ -456,7 +457,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) {
l.notificationParsers[pi.scriptHashWithType] = pi.parser()
}
- log.Debug("registered new event parser")
+ log.Debug(logs.EventRegisteredNewEventParser)
}
// RegisterNotificationHandler registers the handler for particular notification event of contract.
@@ -471,7 +472,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
handler := hi.Handler()
if handler == nil {
- log.Warn("ignore nil event handler")
+ log.Warn(logs.EventIgnoreNilEventHandler)
return
}
@@ -481,7 +482,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
l.mtx.RUnlock()
if !ok {
- log.Warn("ignore handler of event w/o parser")
+ log.Warn(logs.EventIgnoreHandlerOfEventWoParser)
return
}
@@ -493,7 +494,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) {
)
l.mtx.Unlock()
- log.Debug("registered new event handler")
+ log.Debug(logs.EventRegisteredNewEventHandler)
}
// EnableNotarySupport enables notary request listening. Passed hash is
@@ -534,7 +535,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
parser := pi.parser()
if parser == nil {
- log.Info("ignore nil notary event parser")
+ log.Info(logs.EventIgnoreNilNotaryEventParser)
return
}
@@ -543,7 +544,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
// check if the listener was started
if l.started {
- log.Warn("listener has been already started, ignore notary parser")
+ log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser)
return
}
@@ -552,7 +553,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) {
l.notaryParsers[pi.notaryRequestTypes] = pi.parser()
}
- log.Info("registered new event parser")
+ log.Info(logs.EventRegisteredNewEventParser)
}
// RegisterNotaryHandler registers the handler for particular notification notary request event.
@@ -572,7 +573,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
handler := hi.Handler()
if handler == nil {
- log.Warn("ignore nil notary event handler")
+ log.Warn(logs.EventIgnoreNilNotaryEventHandler)
return
}
@@ -582,7 +583,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.mtx.RUnlock()
if !ok {
- log.Warn("ignore handler of notary event w/o parser")
+ log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser)
return
}
@@ -591,7 +592,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) {
l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler()
l.mtx.Unlock()
- log.Info("registered new event handler")
+ log.Info(logs.EventRegisteredNewEventHandler)
}
// Stop closes subscription channel with remote neo node.
@@ -603,7 +604,7 @@ func (l *listener) Stop() {
func (l *listener) RegisterBlockHandler(handler BlockHandler) {
if handler == nil {
- l.log.Warn("ignore nil block handler")
+ l.log.Warn(logs.EventIgnoreNilBlockHandler)
return
}
diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go
index 355fd5b4d..2a7c6250d 100644
--- a/pkg/morph/event/utils.go
+++ b/pkg/morph/event/utils.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -89,7 +90,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle
})
if err != nil {
- log.Warn("could not Submit handler to worker pool",
+ log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go
index 17bed5b2d..a2e1c32eb 100644
--- a/pkg/morph/subscriber/subscriber.go
+++ b/pkg/morph/subscriber/subscriber.go
@@ -6,6 +6,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/nspcc-dev/neo-go/pkg/core/block"
@@ -99,7 +100,7 @@ func (s *subscriber) SubscribeForNotification(contracts ...util.Uint160) error {
func (s *subscriber) UnsubscribeForNotification() {
err := s.client.UnsubscribeAll()
if err != nil {
- s.log.Error("unsubscribe for notification",
+ s.log.Error(logs.SubscriberUnsubscribeForNotification,
zap.Error(err))
}
}
@@ -133,7 +134,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) {
return
case notification, ok := <-notificationChan:
if !ok {
- s.log.Warn("remote notification channel has been closed")
+ s.log.Warn(logs.SubscriberRemoteNotificationChannelHasBeenClosed)
close(s.notifyChan)
close(s.blockChan)
close(s.notaryChan)
@@ -145,13 +146,13 @@ func (s *subscriber) routeNotifications(ctx context.Context) {
case neorpc.NotificationEventID:
notifyEvent, ok := notification.Value.(*state.ContainedNotificationEvent)
if !ok {
- s.log.Error("can't cast notify event value to the notify struct",
+ s.log.Error(logs.SubscriberCantCastNotifyEventValueToTheNotifyStruct,
zap.String("received type", fmt.Sprintf("%T", notification.Value)),
)
continue
}
- s.log.Debug("new notification event from sidechain",
+ s.log.Debug(logs.SubscriberNewNotificationEventFromSidechain,
zap.String("name", notifyEvent.Name),
)
@@ -159,7 +160,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) {
case neorpc.BlockEventID:
b, ok := notification.Value.(*block.Block)
if !ok {
- s.log.Error("can't cast block event value to block",
+ s.log.Error(logs.SubscriberCantCastBlockEventValueToBlock,
zap.String("received type", fmt.Sprintf("%T", notification.Value)),
)
continue
@@ -169,7 +170,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) {
case neorpc.NotaryRequestEventID:
notaryRequest, ok := notification.Value.(*result.NotaryRequestEvent)
if !ok {
- s.log.Error("can't cast notify event value to the notary request struct",
+ s.log.Error(logs.SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct,
zap.String("received type", fmt.Sprintf("%T", notification.Value)),
)
continue
@@ -177,7 +178,7 @@ func (s *subscriber) routeNotifications(ctx context.Context) {
s.notaryChan <- notaryRequest
default:
- s.log.Debug("unsupported notification from the chain",
+ s.log.Debug(logs.SubscriberUnsupportedNotificationFromTheChain,
zap.Uint8("type", uint8(notification.Type)),
)
}
diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go
index bf720c330..194c5188a 100644
--- a/pkg/services/audit/auditor/context.go
+++ b/pkg/services/audit/auditor/context.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -197,7 +198,7 @@ func (c *Context) init() {
func (c *Context) expired(ctx context.Context) bool {
select {
case <-ctx.Done():
- c.log.Debug("audit context is done",
+ c.log.Debug(logs.AuditorAuditContextIsDone,
zap.String("error", ctx.Err().Error()),
)
@@ -212,10 +213,10 @@ func (c *Context) complete() {
}
func (c *Context) writeReport() {
- c.log.Debug("writing audit report...")
+ c.log.Debug(logs.AuditorWritingAuditReport)
if err := c.task.Reporter().WriteReport(c.report); err != nil {
- c.log.Error("could not write audit report")
+ c.log.Error(logs.AuditorCouldNotWriteAuditReport)
}
}
diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go
index 8a184eb7e..d5ad0fea4 100644
--- a/pkg/services/audit/auditor/pdp.go
+++ b/pkg/services/audit/auditor/pdp.go
@@ -6,6 +6,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -130,7 +131,7 @@ func (c *Context) collectHashes(ctx context.Context, p *gamePair) {
sleepDur = time.Duration(rand.Uint64() % c.maxPDPSleep)
}
- c.log.Debug("sleep before get range hash",
+ c.log.Debug(logs.AuditorSleepBeforeGetRangeHash,
zap.Stringer("interval", sleepDur),
)
@@ -140,7 +141,7 @@ func (c *Context) collectHashes(ctx context.Context, p *gamePair) {
h, err := c.cnrCom.GetRangeHash(ctx, getRangeHashPrm)
if err != nil {
- c.log.Debug("could not get payload range hash",
+ c.log.Debug(logs.AuditorCouldNotGetPayloadRangeHash,
zap.Stringer("id", p.id),
zap.String("node", netmap.StringifyPublicKey(n)),
zap.String("error", err.Error()),
diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go
index 32b837794..b64004bbc 100644
--- a/pkg/services/audit/auditor/pop.go
+++ b/pkg/services/audit/auditor/pop.go
@@ -3,6 +3,7 @@ package auditor
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/tzhash/tz"
@@ -61,7 +62,7 @@ func (c *Context) processObjectPlacement(ctx context.Context, id oid.ID, nodes [
// try to get object header from node
hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm)
if err != nil {
- c.log.Debug("could not get object header from candidate",
+ c.log.Debug(logs.AuditorCouldNotGetObjectHeaderFromCandidate,
zap.Stringer("id", id),
zap.String("error", err.Error()),
)
@@ -133,7 +134,7 @@ func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.Nod
// build placement vector for the current object
nn, err := c.buildPlacement(id)
if err != nil {
- c.log.Debug("could not build placement for object",
+ c.log.Debug(logs.AuditorCouldNotBuildPlacementForObject,
zap.Stringer("id", id),
zap.String("error", err.Error()),
)
diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go
index aebc25c68..d579b3a7c 100644
--- a/pkg/services/audit/auditor/por.go
+++ b/pkg/services/audit/auditor/por.go
@@ -5,6 +5,7 @@ import (
"context"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@@ -71,7 +72,7 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
hdr, err := c.cnrCom.GetHeader(ctx, getHeaderPrm)
if err != nil {
- c.log.Debug("can't head object",
+ c.log.Debug(logs.AuditorCantHeadObject,
zap.String("remote_node", netmap.StringifyPublicKey(flat[j])),
zap.Stringer("oid", members[i]),
)
@@ -92,7 +93,7 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
cs.Value(),
})
if err != nil {
- c.log.Debug("can't concatenate tz hash",
+ c.log.Debug(logs.AuditorCantConcatenateTzHash,
zap.String("oid", members[i].String()),
zap.String("error", err.Error()))
@@ -122,13 +123,13 @@ func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg stor
c.report.PassedPoR(sgID)
} else {
if !sizeCheck {
- c.log.Debug("storage group size check failed",
+ c.log.Debug(logs.AuditorStorageGroupSizeCheckFailed,
zap.Uint64("expected", sg.ValidationDataSize()),
zap.Uint64("got", totalSize))
}
if !tzCheck {
- c.log.Debug("storage group tz hash check failed")
+ c.log.Debug(logs.AuditorStorageGroupTzHashCheckFailed)
}
c.report.FailedPoR(sgID)
@@ -138,7 +139,7 @@ func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg stor
func (c *Context) getShuffledNodes(member oid.ID, sgID oid.ID) ([]netmap.NodeInfo, bool) {
objectPlacement, err := c.buildPlacement(member)
if err != nil {
- c.log.Info("can't build placement for storage group member",
+ c.log.Info(logs.AuditorCantBuildPlacementForStorageGroupMember,
zap.Stringer("sg", sgID),
zap.String("member_id", member.String()),
)
diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go
index a16052e13..bfc37c2a1 100644
--- a/pkg/services/audit/taskmanager/listen.go
+++ b/pkg/services/audit/taskmanager/listen.go
@@ -3,6 +3,7 @@ package audittask
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/auditor"
"go.uber.org/zap"
@@ -12,7 +13,7 @@ import (
//
// The listener is terminated by context.
func (m *Manager) Listen(ctx context.Context) {
- m.log.Info("process routine",
+ m.log.Info(logs.TaskmanagerProcessRoutine,
zap.Uint32("queue_capacity", m.queueCap),
)
@@ -21,7 +22,7 @@ func (m *Manager) Listen(ctx context.Context) {
for {
select {
case <-ctx.Done():
- m.log.Warn("stop listener by context",
+ m.log.Warn(logs.TaskmanagerStopListenerByContext,
zap.String("error", ctx.Err().Error()),
)
m.workerPool.Release()
@@ -29,7 +30,7 @@ func (m *Manager) Listen(ctx context.Context) {
return
case task, ok := <-m.ch:
if !ok {
- m.log.Warn("queue channel is closed")
+ m.log.Warn(logs.TaskmanagerQueueChannelIsClosed)
return
}
@@ -51,7 +52,7 @@ func (m *Manager) Listen(ctx context.Context) {
func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted func()) {
pdpPool, err := m.pdpPoolGenerator()
if err != nil {
- m.log.Error("could not generate PDP worker pool",
+ m.log.Error(logs.TaskmanagerCouldNotGeneratePDPWorkerPool,
zap.String("error", err.Error()),
)
onCompleted()
@@ -60,7 +61,7 @@ func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted
porPool, err := m.pdpPoolGenerator()
if err != nil {
- m.log.Error("could not generate PoR worker pool",
+ m.log.Error(logs.TaskmanagerCouldNotGeneratePoRWorkerPool,
zap.String("error", err.Error()),
)
onCompleted()
@@ -73,7 +74,7 @@ func (m *Manager) handleTask(ctx context.Context, task *audit.Task, onCompleted
if err := m.workerPool.Submit(func() { auditContext.Execute(ctx, onCompleted) }); err != nil {
// may be we should report it
- m.log.Warn("could not submit audit task")
+ m.log.Warn(logs.TaskmanagerCouldNotSubmitAuditTask)
onCompleted()
}
}
diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go
index f5d5d1a3d..e1ed6e496 100644
--- a/pkg/services/container/announcement/load/controller/calls.go
+++ b/pkg/services/container/announcement/load/controller/calls.go
@@ -3,6 +3,7 @@ package loadcontroller
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
@@ -52,7 +53,7 @@ func (c *Controller) Start(ctx context.Context, prm StartPrm) {
}
func (c *announcer) announce(ctx context.Context) {
- c.log.Debug("starting to announce the values of the metrics")
+ c.log.Debug(logs.ControllerStartingToAnnounceTheValuesOfTheMetrics)
var (
metricsIterator Iterator
@@ -62,7 +63,7 @@ func (c *announcer) announce(ctx context.Context) {
// initialize iterator over locally collected metrics
metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator()
if err != nil {
- c.log.Debug("could not initialize iterator over locally collected metrics",
+ c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics,
zap.String("error", err.Error()),
)
@@ -72,7 +73,7 @@ func (c *announcer) announce(ctx context.Context) {
// initialize target of local announcements
targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(nil)
if err != nil {
- c.log.Debug("could not initialize announcement accumulator",
+ c.log.Debug(logs.ControllerCouldNotInitializeAnnouncementAccumulator,
zap.String("error", err.Error()),
)
@@ -90,7 +91,7 @@ func (c *announcer) announce(ctx context.Context) {
},
)
if err != nil {
- c.log.Debug("iterator over locally collected metrics aborted",
+ c.log.Debug(logs.ControllerIteratorOverLocallyCollectedMetricsAborted,
zap.String("error", err.Error()),
)
@@ -100,14 +101,14 @@ func (c *announcer) announce(ctx context.Context) {
// finish writing
err = targetWriter.Close(ctx)
if err != nil {
- c.log.Debug("could not finish writing local announcements",
+ c.log.Debug(logs.ControllerCouldNotFinishWritingLocalAnnouncements,
zap.String("error", err.Error()),
)
return
}
- c.log.Debug("trust announcement successfully finished")
+ c.log.Debug(logs.ControllerTrustAnnouncementSuccessfullyFinished)
}
func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (context.Context, *announcer) {
@@ -127,7 +128,7 @@ func (c *Controller) acquireAnnouncement(ctx context.Context, prm StartPrm) (con
)}
if started {
- log.Debug("announcement is already started")
+ log.Debug(logs.ControllerAnnouncementIsAlreadyStarted)
return ctx, nil
}
@@ -159,9 +160,9 @@ func (c *commonContext) freeAnnouncement() {
c.ctrl.announceMtx.Unlock()
if stopped {
- c.log.Debug("announcement successfully interrupted")
+ c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted)
} else {
- c.log.Debug("announcement is not started or already interrupted")
+ c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted)
}
}
@@ -219,7 +220,7 @@ func (c *Controller) acquireReport(ctx context.Context, prm StopPrm) (context.Co
)}
if started {
- log.Debug("report is already started")
+ log.Debug(logs.ControllerReportIsAlreadyStarted)
return ctx, nil
}
@@ -251,9 +252,9 @@ func (c *commonContext) freeReport() {
c.ctrl.reportMtx.Unlock()
if stopped {
- c.log.Debug("announcement successfully interrupted")
+ c.log.Debug(logs.ControllerAnnouncementSuccessfullyInterrupted)
} else {
- c.log.Debug("announcement is not started or already interrupted")
+ c.log.Debug(logs.ControllerAnnouncementIsNotStartedOrAlreadyInterrupted)
}
}
@@ -266,7 +267,7 @@ func (c *reporter) report(ctx context.Context) {
// initialize iterator over locally accumulated announcements
localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator()
if err != nil {
- c.log.Debug("could not initialize iterator over locally accumulated announcements",
+ c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements,
zap.String("error", err.Error()),
)
@@ -276,7 +277,7 @@ func (c *reporter) report(ctx context.Context) {
// initialize final destination of load estimations
resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(nil)
if err != nil {
- c.log.Debug("could not initialize result target",
+ c.log.Debug(logs.ControllerCouldNotInitializeResultTarget,
zap.String("error", err.Error()),
)
@@ -289,7 +290,7 @@ func (c *reporter) report(ctx context.Context) {
resultWriter.Put,
)
if err != nil {
- c.log.Debug("iterator over local announcements aborted",
+ c.log.Debug(logs.ControllerIteratorOverLocalAnnouncementsAborted,
zap.String("error", err.Error()),
)
@@ -299,7 +300,7 @@ func (c *reporter) report(ctx context.Context) {
// finish writing
err = resultWriter.Close(ctx)
if err != nil {
- c.log.Debug("could not finish writing load estimations",
+ c.log.Debug(logs.ControllerCouldNotFinishWritingLoadEstimations,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go
index 83c368f57..9a483aed0 100644
--- a/pkg/services/container/announcement/load/route/calls.go
+++ b/pkg/services/container/announcement/load/route/calls.go
@@ -5,6 +5,7 @@ import (
"encoding/hex"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
loadcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/announcement/load/controller"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
@@ -97,7 +98,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error {
if !ok {
provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
if err != nil {
- w.router.log.Debug("could not initialize writer provider",
+ w.router.log.Debug(logs.RouteCouldNotInitializeWriterProvider,
zap.String("error", err.Error()),
)
@@ -106,7 +107,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error {
remoteWriter, err = provider.InitWriter(w.route)
if err != nil {
- w.router.log.Debug("could not initialize writer",
+ w.router.log.Debug(logs.RouteCouldNotInitializeWriter,
zap.String("error", err.Error()),
)
@@ -118,7 +119,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error {
err := remoteWriter.Put(a)
if err != nil {
- w.router.log.Debug("could not put the value",
+ w.router.log.Debug(logs.RouteCouldNotPutTheValue,
zap.String("error", err.Error()),
)
}
@@ -133,7 +134,7 @@ func (w *loadWriter) Close(ctx context.Context) error {
for key, wRemote := range w.mServers {
err := wRemote.Close(ctx)
if err != nil {
- w.router.log.Debug("could not close remote server writer",
+ w.router.log.Debug(logs.RouteCouldNotCloseRemoteServerWriter,
zap.String("key", key),
zap.String("error", err.Error()),
)
diff --git a/pkg/services/notificator/nats/service.go b/pkg/services/notificator/nats/service.go
index 54eb373ec..6a7e80a53 100644
--- a/pkg/services/notificator/nats/service.go
+++ b/pkg/services/notificator/nats/service.go
@@ -6,6 +6,7 @@ import (
"fmt"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nats-io/nats.go"
@@ -98,10 +99,10 @@ func New(oo ...Option) *Writer {
w.opts.nOpts = append(w.opts.nOpts,
nats.NoCallbacksAfterClientClose(), // do not call callbacks when it was planned writer stop
nats.DisconnectErrHandler(func(conn *nats.Conn, err error) {
- w.log.Error("nats: connection was lost", zap.Error(err))
+ w.log.Error(logs.NatsNatsConnectionWasLost, zap.Error(err))
}),
nats.ReconnectHandler(func(conn *nats.Conn) {
- w.log.Warn("nats: reconnected to the server")
+ w.log.Warn(logs.NatsNatsReconnectedToTheServer)
}),
)
@@ -124,7 +125,7 @@ func (n *Writer) Connect(ctx context.Context, endpoint string) error {
go func() {
<-ctx.Done()
- n.opts.log.Info("nats: closing connection as the context is done")
+ n.opts.log.Info(logs.NatsNatsClosingConnectionAsTheContextIsDone)
nc.Close()
}()
diff --git a/pkg/services/notificator/service.go b/pkg/services/notificator/service.go
index 0a8a5d96d..bbf4e4823 100644
--- a/pkg/services/notificator/service.go
+++ b/pkg/services/notificator/service.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
@@ -74,10 +75,10 @@ func New(prm *Prm) *Notificator {
// and passes their addresses to the NotificationWriter.
func (n *Notificator) ProcessEpoch(ctx context.Context, epoch uint64) {
logger := n.l.With(zap.Uint64("epoch", epoch))
- logger.Debug("notificator: start processing object notifications")
+ logger.Debug(logs.NotificatorNotificatorStartProcessingObjectNotifications)
n.ns.Iterate(ctx, epoch, func(topic string, addr oid.Address) {
- n.l.Debug("notificator: processing object notification",
+ n.l.Debug(logs.NotificatorNotificatorProcessingObjectNotification,
zap.String("topic", topic),
zap.Stringer("address", addr),
)
diff --git a/pkg/services/object/acl/v2/classifier.go b/pkg/services/object/acl/v2/classifier.go
index 2bf5a3958..cdc5fb623 100644
--- a/pkg/services/object/acl/v2/classifier.go
+++ b/pkg/services/object/acl/v2/classifier.go
@@ -4,6 +4,7 @@ import (
"bytes"
"crypto/sha256"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@@ -48,7 +49,7 @@ func (c senderClassifier) classify(
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
- c.log.Debug("can't check if request from inner ring",
+ c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error()))
} else if isInnerRingNode {
return &classifyResult{
@@ -65,7 +66,7 @@ func (c senderClassifier) classify(
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
- c.log.Debug("can't check if request from container node",
+ c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error()))
} else if isContainerNode {
return &classifyResult{
diff --git a/pkg/services/object/delete/container.go b/pkg/services/object/delete/container.go
index a2f099d5b..3106d8efd 100644
--- a/pkg/services/object/delete/container.go
+++ b/pkg/services/object/delete/container.go
@@ -1,5 +1,7 @@
package deletesvc
+import "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+
func (exec *execCtx) executeOnContainer() {
- exec.log.Debug("request is not rolled over to the container")
+ exec.log.Debug(logs.DeleteRequestIsNotRolledOverToTheContainer)
}
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index a959b53cb..971f0a6f5 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -3,6 +3,7 @@ package deletesvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"go.uber.org/zap"
)
@@ -34,7 +35,7 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug("serving request...")
+ exec.log.Debug(logs.DeleteServingRequest)
// perform local operation
exec.executeLocal(ctx)
@@ -46,9 +47,9 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
- exec.log.Debug("operation finished successfully")
+ exec.log.Debug(logs.DeleteOperationFinishedSuccessfully)
default:
- exec.log.Debug("operation finished with error",
+ exec.log.Debug(logs.DeleteOperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go
index 782cad71b..91bc6b3d7 100644
--- a/pkg/services/object/delete/exec.go
+++ b/pkg/services/object/delete/exec.go
@@ -5,6 +5,7 @@ import (
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -83,7 +84,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not compose split info",
+ exec.log.Debug(logs.DeleteCouldNotComposeSplitInfo,
zap.String("error", err.Error()),
)
case err == nil:
@@ -96,7 +97,7 @@ func (exec *execCtx) formSplitInfo(ctx context.Context) bool {
func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) {
if exec.splitInfo == nil {
- exec.log.Debug("no split info, object is PHY")
+ exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY)
return true
}
@@ -119,7 +120,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) (ok bool) {
func (exec *execCtx) collectChain(ctx context.Context) bool {
var chain []oid.ID
- exec.log.Debug("assembling chain...")
+ exec.log.Debug(logs.DeleteAssemblingChain)
for prev, withPrev := exec.splitInfo.LastPart(); withPrev; {
chain = append(chain, prev)
@@ -131,7 +132,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not get previous split element",
+ exec.log.Debug(logs.DeleteCouldNotGetPreviousSplitElement,
zap.Stringer("id", prev),
zap.String("error", err.Error()),
)
@@ -154,7 +155,7 @@ func (exec *execCtx) collectChain(ctx context.Context) bool {
}
func (exec *execCtx) collectChildren(ctx context.Context) bool {
- exec.log.Debug("collecting children...")
+ exec.log.Debug(logs.DeleteCollectingChildren)
children, err := exec.svc.header.children(ctx, exec)
@@ -163,7 +164,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not collect object children",
+ exec.log.Debug(logs.DeleteCouldNotCollectObjectChildren,
zap.String("error", err.Error()),
)
@@ -181,7 +182,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) bool {
}
func (exec *execCtx) supplementBySplitID(ctx context.Context) bool {
- exec.log.Debug("supplement by split ID")
+ exec.log.Debug(logs.DeleteSupplementBySplitID)
chain, err := exec.svc.searcher.splitMembers(ctx, exec)
@@ -190,7 +191,7 @@ func (exec *execCtx) supplementBySplitID(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not search for split chain members",
+ exec.log.Debug(logs.DeleteCouldNotSearchForSplitChainMembers,
zap.String("error", err.Error()),
)
@@ -226,7 +227,7 @@ func (exec *execCtx) initTombstoneObject() bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not marshal tombstone structure",
+ exec.log.Debug(logs.DeleteCouldNotMarshalTombstoneStructure,
zap.String("error", err.Error()),
)
@@ -265,7 +266,7 @@ func (exec *execCtx) saveTombstone(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not save the tombstone",
+ exec.log.Debug(logs.DeleteCouldNotSaveTheTombstone,
zap.String("error", err.Error()),
)
diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go
index 17eb0e4e1..34839b194 100644
--- a/pkg/services/object/delete/local.go
+++ b/pkg/services/object/delete/local.go
@@ -3,20 +3,21 @@ package deletesvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
func (exec *execCtx) executeLocal(ctx context.Context) {
- exec.log.Debug("forming tombstone structure...")
+ exec.log.Debug(logs.DeleteFormingTombstoneStructure)
ok := exec.formTombstone(ctx)
if !ok {
return
}
- exec.log.Debug("tombstone structure successfully formed, saving...")
+ exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving)
exec.saveTombstone(ctx)
}
@@ -27,7 +28,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not read tombstone lifetime config",
+ exec.log.Debug(logs.DeleteCouldNotReadTombstoneLifetimeConfig,
zap.String("error", err.Error()),
)
@@ -40,14 +41,14 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
)
exec.addMembers([]oid.ID{exec.address().Object()})
- exec.log.Debug("forming split info...")
+ exec.log.Debug(logs.DeleteFormingSplitInfo)
ok = exec.formSplitInfo(ctx)
if !ok {
return
}
- exec.log.Debug("split info successfully formed, collecting members...")
+ exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers)
exec.tombstone.SetSplitID(exec.splitInfo.SplitID())
@@ -56,7 +57,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) (ok bool) {
return
}
- exec.log.Debug("members successfully collected")
+ exec.log.Debug(logs.DeleteMembersSuccessfullyCollected)
ok = exec.initTombstoneObject()
if !ok {
diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go
index db71df6a4..d2108b003 100644
--- a/pkg/services/object/get/assemble.go
+++ b/pkg/services/object/get/assemble.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -12,7 +13,7 @@ import (
func (exec *execCtx) assemble(ctx context.Context) {
if !exec.canAssemble() {
- exec.log.Debug("can not assemble the object")
+ exec.log.Debug(logs.GetCanNotAssembleTheObject)
return
}
@@ -34,16 +35,16 @@ func (exec *execCtx) assemble(ctx context.Context) {
// `execCtx` so it should be disabled there.
exec.disableForwarding()
- exec.log.Debug("trying to assemble the object...")
+ exec.log.Debug(logs.GetTryingToAssembleTheObject)
assembler := newAssembler(exec.address(), exec.splitInfo(), exec.ctxRange(), exec)
- exec.log.Debug("assembling splitted object...",
+ exec.log.Debug(logs.GetAssemblingSplittedObject,
zap.Stringer("address", exec.address()),
zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
zap.Uint64("range_length", exec.ctxRange().GetLength()),
)
- defer exec.log.Debug("assembling splitted object completed",
+ defer exec.log.Debug(logs.GetAssemblingSplittedObjectCompleted,
zap.Stringer("address", exec.address()),
zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
zap.Uint64("range_length", exec.ctxRange().GetLength()),
@@ -51,7 +52,7 @@ func (exec *execCtx) assemble(ctx context.Context) {
obj, err := assembler.Assemble(ctx, exec.prm.objWriter)
if err != nil {
- exec.log.Warn("failed to assemble splitted object",
+ exec.log.Warn(logs.GetFailedToAssembleSplittedObject,
zap.Error(err),
zap.Stringer("address", exec.address()),
zap.Uint64("range_offset", exec.ctxRange().GetOffset()),
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index cfb538d38..74d63966e 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -3,19 +3,20 @@ package getsvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) {
if exec.isLocal() {
- exec.log.Debug("return result directly")
+ exec.log.Debug(logs.GetReturnResultDirectly)
return
}
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug("trying to execute in container...",
+ exec.log.Debug(logs.GetTryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
@@ -43,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
- exec.log.Debug("process epoch",
+ exec.log.Debug(logs.GetProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
@@ -60,7 +61,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug("no more nodes, abort placement iteration")
+ exec.log.Debug(logs.GetNoMoreNodesAbortPlacementIteration)
return false
}
@@ -68,7 +69,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for i := range addrs {
select {
case <-ctx.Done():
- exec.log.Debug("interrupt placement iteration by context",
+ exec.log.Debug(logs.GetInterruptPlacementIterationByContext,
zap.String("error", ctx.Err().Error()),
)
@@ -84,7 +85,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
if exec.processNode(ctx, info) {
- exec.log.Debug("completing the operation")
+ exec.log.Debug(logs.GetCompletingTheOperation)
return true
}
}
diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go
index 2ba014574..7f090dd50 100644
--- a/pkg/services/object/get/exec.go
+++ b/pkg/services/object/get/exec.go
@@ -4,6 +4,7 @@ import (
"context"
"crypto/ecdsa"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -149,7 +150,7 @@ func (exec *execCtx) initEpoch() bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not get current epoch number",
+ exec.log.Debug(logs.GetCouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
@@ -170,7 +171,7 @@ func (exec *execCtx) generateTraverser(addr oid.Address) (*placement.Traverser,
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not generate container traverser",
+ exec.log.Debug(logs.GetCouldNotGenerateContainerTraverser,
zap.String("error", err.Error()),
)
@@ -188,7 +189,7 @@ func (exec execCtx) remoteClient(info clientcore.NodeInfo) (getClient, bool) {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not construct remote node client")
+ exec.log.Debug(logs.GetCouldNotConstructRemoteNodeClient)
case err == nil:
return c, true
}
@@ -225,7 +226,7 @@ func (exec *execCtx) writeCollectedHeader(ctx context.Context) bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not write header",
+ exec.log.Debug(logs.GetCouldNotWriteHeader,
zap.String("error", err.Error()),
)
case err == nil:
@@ -248,7 +249,7 @@ func (exec *execCtx) writeObjectPayload(ctx context.Context, obj *objectSDK.Obje
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not write payload chunk",
+ exec.log.Debug(logs.GetCouldNotWritePayloadChunk,
zap.String("error", err.Error()),
)
case err == nil:
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 0f5983e99..bb0d669da 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -3,6 +3,7 @@ package getsvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
@@ -83,7 +84,7 @@ func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) st
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug("serving request...")
+ exec.log.Debug(logs.GetServingRequest)
// perform local operation
exec.executeLocal(ctx)
@@ -95,16 +96,16 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
- exec.log.Debug("operation finished successfully")
+ exec.log.Debug(logs.GetOperationFinishedSuccessfully)
case statusINHUMED:
- exec.log.Debug("requested object was marked as removed")
+ exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
- exec.log.Debug("requested object is virtual")
+ exec.log.Debug(logs.GetRequestedObjectIsVirtual)
exec.assemble(ctx)
case statusOutOfRange:
- exec.log.Debug("requested range is out of object bounds")
+ exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
default:
- exec.log.Debug("operation finished with error",
+ exec.log.Debug(logs.GetOperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go
index 82ed911e4..8ac83d97a 100644
--- a/pkg/services/object/get/local.go
+++ b/pkg/services/object/get/local.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
@@ -29,7 +30,7 @@ func (exec *execCtx) executeLocal(ctx context.Context) {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("local get failed",
+ exec.log.Debug(logs.GetLocalGetFailed,
zap.String("error", err.Error()),
)
case err == nil:
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 697e48ee2..f4f74083b 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -15,7 +16,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- exec.log.Debug("processing node...")
+ exec.log.Debug(logs.GetProcessingNode)
client, ok := exec.remoteClient(info)
if !ok {
@@ -35,7 +36,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
exec.status = statusUndefined
exec.err = errNotFound
- exec.log.Debug("remote call failed",
+ exec.log.Debug(logs.GetRemoteCallFailed,
zap.String("error", err.Error()),
)
case err == nil:
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index d8b59487e..86dc3c2ca 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -6,6 +6,7 @@ import (
"sync"
"sync/atomic"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -198,7 +199,7 @@ func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer.
if t.traversal.submitPrimaryPlacementFinish() {
_, err = t.iteratePlacement(ctx)
if err != nil {
- t.log.Error("additional container broadcast failure", zap.Error(err))
+ t.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index b158bc23e..9df438e00 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -5,19 +5,20 @@ import (
"encoding/hex"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"go.uber.org/zap"
)
func (exec *execCtx) executeOnContainer(ctx context.Context) {
if exec.isLocal() {
- exec.log.Debug("return result directly")
+ exec.log.Debug(logs.SearchReturnResultDirectly)
return
}
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug("trying to execute in container...",
+ exec.log.Debug(logs.SearchTryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
@@ -48,7 +49,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
- exec.log.Debug("process epoch",
+ exec.log.Debug(logs.SearchProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
@@ -63,7 +64,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug("no more nodes, abort placement iteration")
+ exec.log.Debug(logs.SearchNoMoreNodesAbortPlacementIteration)
break
}
@@ -76,7 +77,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug("interrupt placement iteration by context",
+ exec.log.Debug(logs.SearchInterruptPlacementIterationByContext,
zap.String("error", ctx.Err().Error()))
return
default:
@@ -86,7 +87,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug("processing node...", zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(logs.SearchProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
@@ -95,13 +96,13 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
exec.err = err
mtx.Unlock()
- exec.log.Debug("could not construct remote node client")
+ exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient)
return
}
ids, err := c.searchObjects(ctx, exec, info)
if err != nil {
- exec.log.Debug("remote operation failed",
+ exec.log.Debug(logs.SearchRemoteOperationFailed,
zap.String("error", err.Error()))
return
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index f815270d9..1733d7840 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -1,6 +1,7 @@
package searchsvc
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -80,7 +81,7 @@ func (exec *execCtx) initEpoch() bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not get current epoch number",
+ exec.log.Debug(logs.SearchCouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
@@ -99,7 +100,7 @@ func (exec *execCtx) generateTraverser(cnr cid.ID) (*placement.Traverser, bool)
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not generate container traverser",
+ exec.log.Debug(logs.SearchCouldNotGenerateContainerTraverser,
zap.String("error", err.Error()),
)
@@ -118,7 +119,7 @@ func (exec *execCtx) writeIDList(ids []oid.ID) {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("could not write object identifiers",
+ exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers,
zap.String("error", err.Error()),
)
case err == nil:
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index 1e4776921..f768c8861 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -1,6 +1,7 @@
package searchsvc
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
@@ -11,7 +12,7 @@ func (exec *execCtx) executeLocal() {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug("local operation failed",
+ exec.log.Debug(logs.SearchLocalOperationFailed,
zap.String("error", err.Error()),
)
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 325b42a54..5bf0710ad 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -3,6 +3,7 @@ package searchsvc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
@@ -23,7 +24,7 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug("serving request...")
+ exec.log.Debug(logs.SearchServingRequest)
// perform local operation
exec.executeLocal()
@@ -35,11 +36,11 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
default:
- exec.log.Debug("operation finished with error",
+ exec.log.Debug(logs.SearchOperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
case statusOK:
- exec.log.Debug("operation finished successfully")
+ exec.log.Debug(logs.SearchOperationFinishedSuccessfully)
}
if execCnr {
diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go
index beda45c0c..92beedaa7 100644
--- a/pkg/services/object/util/log.go
+++ b/pkg/services/object/util/log.go
@@ -1,6 +1,7 @@
package util
import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
@@ -8,7 +9,7 @@ import (
// LogServiceError writes error message of object service to provided logger.
func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) {
- l.Error("object service error",
+ l.Error(logs.UtilObjectServiceError,
zap.String("node", network.StringifyGroup(node)),
zap.String("request", req),
zap.String("error", err.Error()),
@@ -17,7 +18,7 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er
// LogWorkerPoolError writes debug error message of object worker pool to provided logger.
func LogWorkerPoolError(l *logger.Logger, req string, err error) {
- l.Error("could not push task to worker pool",
+ l.Error(logs.UtilCouldNotPushTaskToWorkerPool,
zap.String("request", req),
zap.String("error", err.Error()),
)
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index 4097f22bf..46fcc9840 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -5,6 +5,7 @@ import (
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -57,7 +58,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
ts, err := g.tsSource.Tombstone(ctx, a, epoch)
if err != nil {
log.Warn(
- "tombstone getter: could not get the tombstone the source",
+ logs.TombstoneCouldNotGetTheTombstoneTheSource,
zap.Error(err),
)
} else {
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 9cdc4d813..e91b8871b 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
@@ -73,7 +74,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
cnr, err := p.cnrSrc.Get(idCnr)
if err != nil {
- p.log.Error("could not get container",
+ p.log.Error(logs.PolicerCouldNotGetContainer,
zap.Stringer("cid", idCnr),
zap.String("error", err.Error()),
)
@@ -84,7 +85,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
_, err := p.jobQueue.localStorage.Inhume(ctx, prm)
if err != nil {
- p.log.Error("could not inhume object with missing container",
+ p.log.Error(logs.PolicerCouldNotInhumeObjectWithMissingContainer,
zap.Stringer("cid", idCnr),
zap.Stringer("oid", idObj),
zap.String("error", err.Error()))
@@ -98,7 +99,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy)
if err != nil {
- p.log.Error("could not build placement vector for object",
+ p.log.Error(logs.PolicerCouldNotBuildPlacementVectorForObject,
zap.Stringer("cid", idCnr),
zap.String("error", err.Error()),
)
@@ -127,7 +128,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
}
if !c.needLocalCopy && c.removeLocalCopy {
- p.log.Info("redundant local object copy detected",
+ p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected,
zap.Stringer("object", addr),
)
@@ -199,7 +200,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
if isClientErrMaintenance(err) {
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else if err != nil {
- p.log.Error("receive object header to check policy compliance",
+ p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
zap.Stringer("object", addr),
zap.String("error", err.Error()),
)
@@ -228,7 +229,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach
shortage--
uncheckedCopies++
- p.log.Debug("consider node under maintenance as OK",
+ p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK,
zap.String("node", netmap.StringifyPublicKey(node)),
)
return shortage, uncheckedCopies
@@ -237,7 +238,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCach
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) {
if shortage > 0 {
- p.log.Debug("shortage of object copies detected",
+ p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected,
zap.Stringer("object", addr),
zap.Uint32("shortage", shortage),
)
@@ -251,7 +252,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address
} else if uncheckedCopies > 0 {
// If we have more copies than needed, but some of them are from the maintenance nodes,
// save the local copy.
- p.log.Debug("some of the copies are stored on nodes under maintenance, save local copy",
+ p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance,
zap.Int("count", uncheckedCopies))
} else if uncheckedCopies == 0 {
// Safe to remove: checked all copies, shortage == 0.
diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go
index 687216407..4a40f00ba 100644
--- a/pkg/services/policer/process.go
+++ b/pkg/services/policer/process.go
@@ -5,6 +5,7 @@ import (
"errors"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
"go.uber.org/zap"
@@ -12,7 +13,7 @@ import (
func (p *Policer) Run(ctx context.Context) {
defer func() {
- p.log.Info("routine stopped")
+ p.log.Info(logs.PolicerRoutineStopped)
}()
go p.poolCapacityWorker(ctx)
@@ -39,7 +40,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
time.Sleep(time.Second) // finished whole cycle, sleep a bit
continue
}
- p.log.Warn("failure at object select for replication", zap.Error(err))
+ p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err))
}
for i := range addrs {
@@ -68,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
p.objsInWork.remove(addr.Address)
})
if err != nil {
- p.log.Warn("pool submission", zap.Error(err))
+ p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err))
}
}
}
@@ -91,7 +92,7 @@ func (p *Policer) poolCapacityWorker(ctx context.Context) {
if p.taskPool.Cap() != newCapacity {
p.taskPool.Tune(newCapacity)
- p.log.Debug("tune replication capacity",
+ p.log.Debug(logs.PolicerTuneReplicationCapacity,
zap.Float64("system_load", frostfsSysLoad),
zap.Int("new_capacity", newCapacity))
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 53df81b77..46e0c9468 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -3,6 +3,7 @@ package replicator
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -20,7 +21,7 @@ type TaskResult interface {
// Passes all the nodes that accepted the replication to the TaskResult.
func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) {
defer func() {
- p.log.Debug("finish work",
+ p.log.Debug(logs.ReplicatorFinishWork,
zap.Uint32("amount of unfinished replicas", task.quantity),
)
}()
@@ -29,7 +30,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult)
var err error
task.obj, err = engine.Get(ctx, p.localStorage, task.addr)
if err != nil {
- p.log.Error("could not get object from local storage",
+ p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage,
zap.Stringer("object", task.addr),
zap.Error(err))
@@ -59,11 +60,11 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult)
cancel()
if err != nil {
- log.Error("could not replicate object",
+ log.Error(logs.ReplicatorCouldNotReplicateObject,
zap.String("error", err.Error()),
)
} else {
- log.Debug("object successfully replicated")
+ log.Debug(logs.ReplicatorObjectSuccessfullyReplicated)
task.quantity--
diff --git a/pkg/services/reputation/common/managers.go b/pkg/services/reputation/common/managers.go
index ef11b8122..84201809f 100644
--- a/pkg/services/reputation/common/managers.go
+++ b/pkg/services/reputation/common/managers.go
@@ -3,6 +3,7 @@ package common
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
apiNetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -76,7 +77,7 @@ func (x nodeServer) ExternalAddresses() []string {
// BuildManagers sorts nodes in NetMap with HRW algorithms and
// takes the next node after the current one as the only manager.
func (mb *managerBuilder) BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) {
- mb.log.Debug("start building managers",
+ mb.log.Debug(logs.CommonStartBuildingManagers,
zap.Uint64("epoch", epoch),
zap.Stringer("peer", p),
)
diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go
index a177f6a2b..4ed293beb 100644
--- a/pkg/services/reputation/common/router/calls.go
+++ b/pkg/services/reputation/common/router/calls.go
@@ -5,6 +5,7 @@ import (
"encoding/hex"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"go.uber.org/zap"
@@ -92,7 +93,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error {
if !ok {
provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
if err != nil {
- w.router.log.Debug("could not initialize writer provider",
+ w.router.log.Debug(logs.RouterCouldNotInitializeWriterProvider,
zap.String("error", err.Error()),
)
@@ -102,7 +103,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error {
// init writer with original context wrapped in routeContext
remoteWriter, err = provider.InitWriter(w.routeInfo.EpochProvider)
if err != nil {
- w.router.log.Debug("could not initialize writer",
+ w.router.log.Debug(logs.RouterCouldNotInitializeWriter,
zap.String("error", err.Error()),
)
@@ -114,7 +115,7 @@ func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error {
err := remoteWriter.Write(ctx, t)
if err != nil {
- w.router.log.Debug("could not write the value",
+ w.router.log.Debug(logs.RouterCouldNotWriteTheValue,
zap.String("error", err.Error()),
)
}
@@ -127,7 +128,7 @@ func (w *trustWriter) Close(ctx context.Context) error {
for key, wRemote := range w.mServers {
err := wRemote.Close(ctx)
if err != nil {
- w.router.log.Debug("could not close remote server writer",
+ w.router.log.Debug(logs.RouterCouldNotCloseRemoteServerWriter,
zap.String("key", key),
zap.String("error", err.Error()),
)
diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go
index a8e5cf1da..5e2e900ae 100644
--- a/pkg/services/reputation/eigentrust/calculator/calls.go
+++ b/pkg/services/reputation/eigentrust/calculator/calls.go
@@ -3,6 +3,7 @@ package eigentrustcalc
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
@@ -27,7 +28,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
alpha, err := c.prm.AlphaProvider.EigenTrustAlpha()
if err != nil {
c.opts.log.Debug(
- "failed to get alpha param",
+ logs.CalculatorFailedToGetAlphaParam,
zap.Error(err),
)
return
@@ -56,7 +57,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(epochIteration)
if err != nil {
- log.Debug("consumers trust iterator's init failure",
+ log.Debug(logs.CalculatorConsumersTrustIteratorsInitFailure,
zap.String("error", err.Error()),
)
@@ -76,7 +77,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
})
})
if err != nil {
- log.Debug("worker pool submit failure",
+ log.Debug(logs.CalculatorWorkerPoolSubmitFailure,
zap.String("error", err.Error()),
)
}
@@ -85,7 +86,7 @@ func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
return nil
})
if err != nil {
- log.Debug("iterate daughter's consumers failed",
+ log.Debug(logs.CalculatorIterateDaughtersConsumersFailed,
zap.String("error", err.Error()),
)
}
@@ -104,7 +105,7 @@ type iterDaughterPrm struct {
func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id)
if err != nil {
- c.opts.log.Debug("get initial trust failure",
+ c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure,
zap.Stringer("daughter", p.id),
zap.String("error", err.Error()),
)
@@ -114,7 +115,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ei, p.id)
if err != nil {
- c.opts.log.Debug("daughter trust iterator's init failure",
+ c.opts.log.Debug(logs.CalculatorDaughterTrustIteratorsInitFailure,
zap.String("error", err.Error()),
)
@@ -136,7 +137,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
return nil
})
if err != nil {
- c.opts.log.Debug("iterate over daughter's trusts failure",
+ c.opts.log.Debug(logs.CalculatorIterateOverDaughtersTrustsFailure,
zap.String("error", err.Error()),
)
@@ -165,7 +166,7 @@ func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust eigentrust.IterationTrust, sum reputation.TrustValue) {
finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei)
if err != nil {
- c.opts.log.Debug("init writer failure",
+ c.opts.log.Debug(logs.CalculatorInitWriterFailure,
zap.String("error", err.Error()),
)
@@ -176,7 +177,7 @@ func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust e
err = finalWriter.WriteIntermediateTrust(intermediateTrust)
if err != nil {
- c.opts.log.Debug("write final result failure",
+ c.opts.log.Debug(logs.CalculatorWriteFinalResultFailure,
zap.String("error", err.Error()),
)
@@ -187,7 +188,7 @@ func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust e
func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDaughterPrm, daughterIter TrustIterator, sum reputation.TrustValue) {
intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei)
if err != nil {
- c.opts.log.Debug("init writer failure",
+ c.opts.log.Debug(logs.CalculatorInitWriterFailure,
zap.String("error", err.Error()),
)
@@ -208,7 +209,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau
err := intermediateWriter.Write(ctx, trust)
if err != nil {
- c.opts.log.Debug("write value failure",
+ c.opts.log.Debug(logs.CalculatorWriteValueFailure,
zap.String("error", err.Error()),
)
}
@@ -216,7 +217,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau
return nil
})
if err != nil {
- c.opts.log.Debug("iterate daughter trusts failure",
+ c.opts.log.Debug(logs.CalculatorIterateDaughterTrustsFailure,
zap.String("error", err.Error()),
)
}
@@ -233,7 +234,7 @@ func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDau
func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochIterationInfo) {
daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(epochInfo)
if err != nil {
- c.opts.log.Debug("all daughters trust iterator's init failure",
+ c.opts.log.Debug(logs.CalculatorAllDaughtersTrustIteratorsInitFailure,
zap.String("error", err.Error()),
)
@@ -242,7 +243,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera
intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(epochInfo)
if err != nil {
- c.opts.log.Debug("init writer failure",
+ c.opts.log.Debug(logs.CalculatorInitWriterFailure,
zap.String("error", err.Error()),
)
@@ -255,7 +256,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera
initTrust, err := c.prm.InitialTrustSource.InitialTrust(trusted)
if err != nil {
- c.opts.log.Debug("get initial trust failure",
+ c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure,
zap.Stringer("peer", trusted),
zap.String("error", err.Error()),
)
@@ -269,7 +270,7 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera
err = intermediateWriter.Write(ctx, trust)
if err != nil {
- c.opts.log.Debug("write value failure",
+ c.opts.log.Debug(logs.CalculatorWriteValueFailure,
zap.String("error", err.Error()),
)
@@ -280,14 +281,14 @@ func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochItera
})
})
if err != nil {
- c.opts.log.Debug("iterate over all daughters failure",
+ c.opts.log.Debug(logs.CalculatorIterateOverAllDaughtersFailure,
zap.String("error", err.Error()),
)
}
err = intermediateWriter.Close(ctx)
if err != nil {
- c.opts.log.Debug("could not close writer",
+ c.opts.log.Debug(logs.CalculatorCouldNotCloseWriter,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go
index 1753a430b..886daf9be 100644
--- a/pkg/services/reputation/eigentrust/controller/calls.go
+++ b/pkg/services/reputation/eigentrust/controller/calls.go
@@ -3,6 +3,7 @@ package eigentrustctrl
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
"go.uber.org/zap"
)
@@ -37,7 +38,7 @@ func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) {
iterations, err := c.prm.IterationsProvider.EigenTrustIterations()
if err != nil {
- c.opts.log.Error("could not get EigenTrust iteration number",
+ c.opts.log.Error(logs.ControllerCouldNotGetEigenTrustIterationNumber,
zap.Error(err),
)
} else {
@@ -54,7 +55,7 @@ func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) {
iterCtx.Increment()
})
if err != nil {
- c.opts.log.Debug("iteration submit failure",
+ c.opts.log.Debug(logs.ControllerIterationSubmitFailure,
zap.String("error", err.Error()),
)
}
diff --git a/pkg/services/reputation/eigentrust/routes/calls.go b/pkg/services/reputation/eigentrust/routes/calls.go
index c4d9688a9..ccb2fe8ea 100644
--- a/pkg/services/reputation/eigentrust/routes/calls.go
+++ b/pkg/services/reputation/eigentrust/routes/calls.go
@@ -3,6 +3,7 @@ package routes
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"go.uber.org/zap"
@@ -14,7 +15,7 @@ import (
func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
passedLen := len(passed)
- b.log.Debug("building next stage for trust route",
+ b.log.Debug(logs.RoutesBuildingNextStageForTrustRoute,
zap.Uint64("epoch", epoch),
zap.Int("passed_length", passedLen),
)
diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go
index 80fa772d6..1cad09313 100644
--- a/pkg/services/reputation/local/controller/calls.go
+++ b/pkg/services/reputation/local/controller/calls.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -77,7 +78,7 @@ func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context
)}
if started {
- log.Debug("report is already started")
+ log.Debug(logs.ControllerReportIsAlreadyStarted)
return ctx, nil
}
@@ -92,12 +93,12 @@ func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context
}
func (c *reporter) report(ctx context.Context) {
- c.log.Debug("starting to report local trust values")
+ c.log.Debug(logs.ControllerStartingToReportLocalTrustValues)
// initialize iterator over locally collected values
iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ep)
if err != nil {
- c.log.Debug("could not initialize iterator over local trust values",
+ c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocalTrustValues,
zap.String("error", err.Error()),
)
@@ -107,7 +108,7 @@ func (c *reporter) report(ctx context.Context) {
// initialize target of local trust values
targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ep)
if err != nil {
- c.log.Debug("could not initialize local trust target",
+ c.log.Debug(logs.ControllerCouldNotInitializeLocalTrustTarget,
zap.String("error", err.Error()),
)
@@ -126,7 +127,7 @@ func (c *reporter) report(ctx context.Context) {
},
)
if err != nil && !errors.Is(err, context.Canceled) {
- c.log.Debug("iterator over local trust failed",
+ c.log.Debug(logs.ControllerIteratorOverLocalTrustFailed,
zap.String("error", err.Error()),
)
@@ -136,14 +137,14 @@ func (c *reporter) report(ctx context.Context) {
// finish writing
err = targetWriter.Close(ctx)
if err != nil {
- c.log.Debug("could not finish writing local trust values",
+ c.log.Debug(logs.ControllerCouldNotFinishWritingLocalTrustValues,
zap.String("error", err.Error()),
)
return
}
- c.log.Debug("reporting successfully finished")
+ c.log.Debug(logs.ControllerReportingSuccessfullyFinished)
}
func (c *Controller) freeReport(epoch uint64, log *logger.Logger) {
@@ -165,9 +166,9 @@ func (c *Controller) freeReport(epoch uint64, log *logger.Logger) {
c.mtx.Unlock()
if stopped {
- log.Debug("reporting successfully interrupted")
+ log.Debug(logs.ControllerReportingSuccessfullyInterrupted)
} else {
- log.Debug("reporting is not started or already interrupted")
+ log.Debug(logs.ControllerReportingIsNotStartedOrAlreadyInterrupted)
}
}
diff --git a/pkg/services/reputation/local/routes/calls.go b/pkg/services/reputation/local/routes/calls.go
index f0eae16fe..2f99f0e10 100644
--- a/pkg/services/reputation/local/routes/calls.go
+++ b/pkg/services/reputation/local/routes/calls.go
@@ -3,6 +3,7 @@ package routes
import (
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
"go.uber.org/zap"
@@ -14,7 +15,7 @@ import (
func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
passedLen := len(passed)
- b.log.Debug("building next stage for local trust route",
+ b.log.Debug(logs.RoutesBuildingNextStageForLocalTrustRoute,
zap.Uint64("epoch", epoch),
zap.Int("passed_length", passedLen),
)
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index 237a13962..0be6497be 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -5,6 +5,7 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -28,7 +29,7 @@ func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server {
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug("serving request...",
+ s.log.Debug(logs.SessionServingRequest,
zap.String("component", "SessionService"),
zap.String("request", "Create"),
)
diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go
index ded33d1ec..25f067d62 100644
--- a/pkg/services/session/storage/persistent/storage.go
+++ b/pkg/services/session/storage/persistent/storage.go
@@ -6,6 +6,7 @@ import (
"encoding/hex"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@@ -108,7 +109,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok
return nil
})
if err != nil {
- s.l.Error("could not get session from persistent storage",
+ s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage,
zap.Error(err),
zap.Stringer("ownerID", ownerID),
zap.String("tokenID", hex.EncodeToString(tokenID)),
@@ -133,7 +134,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
if epochFromToken(v) <= epoch {
err = c.Delete()
if err != nil {
- s.l.Error("could not delete %s token",
+ s.l.Error(logs.PersistentCouldNotDeleteSToken,
zap.String("token_id", hex.EncodeToString(k)),
)
}
@@ -144,7 +145,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) {
})
})
if err != nil {
- s.l.Error("could not clean up expired tokens",
+ s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens,
zap.Uint64("epoch", epoch),
)
}
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 1671d2511..9594514f1 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -5,6 +5,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.uber.org/zap"
)
@@ -29,7 +30,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
return false
}
- s.log.Debug("redirecting tree service query", zap.String("endpoint", endpoint))
+ s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint))
called = true
stop = f(c)
return true
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index bb20310b2..98ed3df39 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -8,6 +8,7 @@ import (
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -45,7 +46,7 @@ func (s *Service) localReplicationWorker() {
case op := <-s.replicateLocalCh:
err := s.forest.TreeApply(op.cid, op.treeID, &op.Move, false)
if err != nil {
- s.log.Error("failed to apply replicated operation",
+ s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
zap.String("err", err.Error()))
}
}
@@ -79,10 +80,10 @@ func (s *Service) replicationWorker(ctx context.Context) {
if lastErr != nil {
if errors.Is(lastErr, errRecentlyFailed) {
- s.log.Debug("do not send update to the node",
+ s.log.Debug(logs.TreeDoNotSendUpdateToTheNode,
zap.String("last_error", lastErr.Error()))
} else {
- s.log.Warn("failed to sent update to the node",
+ s.log.Warn(logs.TreeFailedToSentUpdateToTheNode,
zap.String("last_error", lastErr.Error()),
zap.String("address", lastAddr),
zap.String("key", hex.EncodeToString(task.n.PublicKey())))
@@ -112,7 +113,7 @@ func (s *Service) replicateLoop(ctx context.Context) {
case op := <-s.replicateCh:
err := s.replicate(op)
if err != nil {
- s.log.Error("error during replication",
+ s.log.Error(logs.TreeErrorDuringReplication,
zap.String("err", err.Error()),
zap.Stringer("cid", op.cid),
zap.String("treeID", op.treeID))
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 7a5a95c4c..e861541f4 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -8,6 +8,7 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -76,7 +77,7 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op
var tableFromBearer bool
if len(rawBearer) != 0 {
if !basicACL.AllowedBearerRules(op) {
- s.log.Debug("bearer presented but not allowed by ACL",
+ s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
zap.String("cid", cid.EncodeToString()),
zap.String("op", op.String()),
)
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 47299d1c9..91f43900f 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -11,6 +11,7 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
@@ -86,7 +87,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
for _, tid := range treesToSync {
h, err := s.forest.TreeLastSyncHeight(cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
- s.log.Warn("could not get last synchronized height for a tree",
+ s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
continue
@@ -94,7 +95,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
if err := s.forest.TreeUpdateLastSyncHeight(cid, tid, newHeight); err != nil {
- s.log.Warn("could not update last synchronized height for a tree",
+ s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
}
@@ -126,7 +127,7 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string
func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
treeID string, nodes []netmapSDK.NodeInfo) uint64 {
- s.log.Debug("synchronize tree",
+ s.log.Debug(logs.TreeSynchronizeTree,
zap.Stringer("cid", cid),
zap.String("tree", treeID),
zap.Uint64("from", from))
@@ -184,7 +185,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
}
if err := errGroup.Wait(); err != nil {
- s.log.Warn("failed to run tree synchronization over all nodes", zap.Error(err))
+ s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err))
}
newHeight := uint64(math.MaxUint64)
@@ -283,11 +284,11 @@ func (s *Service) syncLoop(ctx context.Context) {
case <-ctx.Done():
return
case <-s.syncChan:
- s.log.Debug("syncing trees...")
+ s.log.Debug(logs.TreeSyncingTrees)
cnrs, err := s.cfg.cnrSource.List()
if err != nil {
- s.log.Error("could not fetch containers", zap.Error(err))
+ s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
continue
}
@@ -297,7 +298,7 @@ func (s *Service) syncLoop(ctx context.Context) {
s.removeContainers(ctx, newMap)
- s.log.Debug("trees have been synchronized")
+ s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
}
}
}
@@ -310,19 +311,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
cnr := cnr
err := s.syncPool.Submit(func() {
defer wg.Done()
- s.log.Debug("syncing container trees...", zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
err := s.synchronizeAllTrees(ctx, cnr)
if err != nil {
- s.log.Error("could not sync trees", zap.Stringer("cid", cnr), zap.Error(err))
+ s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err))
return
}
- s.log.Debug("container trees have been synced", zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr))
})
if err != nil {
wg.Done()
- s.log.Error("could not query trees for synchronization",
+ s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization,
zap.Stringer("cid", cnr),
zap.Error(err))
if errors.Is(err, ants.ErrPoolClosed) {
@@ -349,11 +350,11 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
}
for _, cnr := range removed {
- s.log.Debug("removing redundant trees...", zap.Stringer("cid", cnr))
+ s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr))
err := s.DropTree(ctx, cnr, "")
if err != nil {
- s.log.Error("could not remove redundant tree",
+ s.log.Error(logs.TreeCouldNotRemoveRedundantTree,
zap.Stringer("cid", cnr),
zap.Error(err))
}
@@ -367,7 +368,7 @@ func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID
for _, cnr := range cnrs {
_, pos, err := s.getContainerNodes(cnr)
if err != nil {
- s.log.Error("could not calculate container nodes",
+ s.log.Error(logs.TreeCouldNotCalculateContainerNodes,
zap.Stringer("cid", cnr),
zap.Error(err))
continue
From cffcc7745e99bb788f12f18e9f8d286239ac7eb9 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 13 Apr 2023 15:51:36 +0300
Subject: [PATCH 0102/1943] [#240] logs: Factor out common service log messages
Signed-off-by: Evgenii Stratonikov
---
internal/logs/logs.go | 43 +++++++------------
pkg/innerring/processors/balance/handlers.go | 2 +-
.../processors/container/handlers.go | 6 +--
pkg/innerring/processors/frostfs/handlers.go | 12 +++---
pkg/innerring/processors/netmap/handlers.go | 8 ++--
.../processors/reputation/handlers.go | 2 +-
pkg/services/object/delete/delete.go | 6 +--
pkg/services/object/get/container.go | 8 ++--
pkg/services/object/get/exec.go | 2 +-
pkg/services/object/get/get.go | 6 +--
pkg/services/object/get/remote.go | 2 +-
pkg/services/object/search/container.go | 10 ++---
pkg/services/object/search/exec.go | 2 +-
pkg/services/object/search/search.go | 6 +--
pkg/services/session/executor.go | 2 +-
15 files changed, 53 insertions(+), 64 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 46ed8e867..83acedfb4 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -1,5 +1,21 @@
package logs
+// Common service logs.
+const (
+ ServingRequest = "serving request..."
+ OperationFinishedSuccessfully = "operation finished successfully"
+ OperationFinishedWithError = "operation finished with error"
+
+ TryingToExecuteInContainer = "trying to execute in container..."
+ CouldNotGetCurrentEpochNumber = "could not get current epoch number"
+ ProcessEpoch = "process epoch"
+ ProcessingNode = "processing node..."
+ NoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration"
+ InterruptPlacementIterationByContext = "interrupt placement iteration by context"
+
+ Notification = "notification"
+)
+
const (
InnerringAmountCanNotBeRepresentedAsAnInt64 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go
InnerringCantGetUsedSpaceEstimation = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go
@@ -56,7 +72,6 @@ const (
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
- SessionServingRequest = "serving request..." // Debug in ../node/pkg/services/session/executor.go
TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
@@ -115,9 +130,6 @@ const (
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
- DeleteServingRequest = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go
- DeleteOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go
- DeleteOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go
DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
@@ -134,47 +146,29 @@ const (
DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
- GetProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go
GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
- GetCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
- GetTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go
- GetProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/get/container.go
- GetNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go
- GetInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go
GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
- GetServingRequest = "serving request..." // Debug in ../node/pkg/services/object/get/get.go
- GetOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
- GetOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
- SearchTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go
- SearchProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/search/container.go
- SearchNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go
- SearchInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go
- SearchProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/search/container.go
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
- SearchCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
- SearchServingRequest = "serving request..." // Debug in ../node/pkg/services/object/search/search.go
- SearchOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go
- SearchOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go
UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
@@ -419,13 +413,11 @@ const (
AuditParseClientNodeInfo = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go
AuditErrorInStorageGroupSearch = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go
AuditCouldNotGetStorageGroupObjectForAuditSkipping = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go
- BalanceNotification = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
- ContainerNotification = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
@@ -442,7 +434,6 @@ const (
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
- FrostFSNotification = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
@@ -474,7 +465,6 @@ const (
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
- NetmapNotification = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
@@ -505,7 +495,6 @@ const (
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
- ReputationNotification = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go
ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go
index 3360af916..e325da1f9 100644
--- a/pkg/innerring/processors/balance/handlers.go
+++ b/pkg/innerring/processors/balance/handlers.go
@@ -11,7 +11,7 @@ import (
func (bp *Processor) handleLock(ev event.Event) {
lock := ev.(balanceEvent.Lock)
- bp.log.Info(logs.BalanceNotification,
+ bp.log.Info(logs.Notification,
zap.String("type", "lock"),
zap.String("value", hex.EncodeToString(lock.ID())))
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index 3d1946b4f..8d260808b 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -14,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) {
put := ev.(putEvent)
id := sha256.Sum256(put.Container())
- cp.log.Info(logs.ContainerNotification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container put"),
zap.String("id", base58.Encode(id[:])))
@@ -30,7 +30,7 @@ func (cp *Processor) handlePut(ev event.Event) {
func (cp *Processor) handleDelete(ev event.Event) {
del := ev.(containerEvent.Delete)
- cp.log.Info(logs.ContainerNotification,
+ cp.log.Info(logs.Notification,
zap.String("type", "container delete"),
zap.String("id", base58.Encode(del.ContainerID())))
@@ -47,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
func (cp *Processor) handleSetEACL(ev event.Event) {
e := ev.(containerEvent.SetEACL)
- cp.log.Info(logs.ContainerNotification,
+ cp.log.Info(logs.Notification,
zap.String("type", "set EACL"),
)
diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go
index 4eff15abe..4822cac2c 100644
--- a/pkg/innerring/processors/frostfs/handlers.go
+++ b/pkg/innerring/processors/frostfs/handlers.go
@@ -12,7 +12,7 @@ import (
func (np *Processor) handleDeposit(ev event.Event) {
deposit := ev.(frostfsEvent.Deposit)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "deposit"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
@@ -28,7 +28,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
func (np *Processor) handleWithdraw(ev event.Event) {
withdraw := ev.(frostfsEvent.Withdraw)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "withdraw"),
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
@@ -44,7 +44,7 @@ func (np *Processor) handleWithdraw(ev event.Event) {
func (np *Processor) handleCheque(ev event.Event) {
cheque := ev.(frostfsEvent.Cheque)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "cheque"),
zap.String("id", hex.EncodeToString(cheque.ID())))
@@ -60,7 +60,7 @@ func (np *Processor) handleCheque(ev event.Event) {
func (np *Processor) handleConfig(ev event.Event) {
cfg := ev.(frostfsEvent.Config)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "set config"),
zap.String("key", hex.EncodeToString(cfg.Key())),
zap.String("value", hex.EncodeToString(cfg.Value())))
@@ -77,7 +77,7 @@ func (np *Processor) handleConfig(ev event.Event) {
func (np *Processor) handleBind(ev event.Event) {
e := ev.(frostfsEvent.Bind)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "bind"),
)
@@ -93,7 +93,7 @@ func (np *Processor) handleBind(ev event.Event) {
func (np *Processor) handleUnbind(ev event.Event) {
e := ev.(frostfsEvent.Unbind)
- np.log.Info(logs.FrostFSNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "unbind"),
)
diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go
index 76b27c891..60d279940 100644
--- a/pkg/innerring/processors/netmap/handlers.go
+++ b/pkg/innerring/processors/netmap/handlers.go
@@ -27,7 +27,7 @@ func (np *Processor) HandleNewEpochTick(ev event.Event) {
func (np *Processor) handleNewEpoch(ev event.Event) {
epochEvent := ev.(netmapEvent.NewEpoch)
- np.log.Info(logs.NetmapNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "new epoch"),
zap.Uint64("value", epochEvent.EpochNumber()))
@@ -46,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
func (np *Processor) handleAddPeer(ev event.Event) {
newPeer := ev.(netmapEvent.AddPeer)
- np.log.Info(logs.NetmapNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "add peer"),
)
@@ -64,7 +64,7 @@ func (np *Processor) handleAddPeer(ev event.Event) {
func (np *Processor) handleUpdateState(ev event.Event) {
updPeer := ev.(netmapEvent.UpdatePeer)
- np.log.Info(logs.NetmapNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "update peer state"),
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
@@ -105,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
func (np *Processor) handleRemoveNode(ev event.Event) {
removeNode := ev.(subnetevents.RemoveNode)
- np.log.Info(logs.NetmapNotification,
+ np.log.Info(logs.Notification,
zap.String("type", "remove node from subnet"),
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
zap.String("key", hex.EncodeToString(removeNode.Node())),
diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go
index 30e3e9503..9b8e7f66a 100644
--- a/pkg/innerring/processors/reputation/handlers.go
+++ b/pkg/innerring/processors/reputation/handlers.go
@@ -14,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
peerID := put.PeerID()
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Info(logs.ReputationNotification,
+ rp.log.Info(logs.Notification,
zap.String("type", "reputation put"),
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go
index 971f0a6f5..ebc191538 100644
--- a/pkg/services/object/delete/delete.go
+++ b/pkg/services/object/delete/delete.go
@@ -35,7 +35,7 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug(logs.DeleteServingRequest)
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
exec.executeLocal(ctx)
@@ -47,9 +47,9 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
- exec.log.Debug(logs.DeleteOperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
default:
- exec.log.Debug(logs.DeleteOperationFinishedWithError,
+ exec.log.Debug(logs.OperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index 74d63966e..17628e577 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -16,7 +16,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(logs.GetTryingToExecuteInContainer,
+ exec.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
@@ -44,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
- exec.log.Debug(logs.GetProcessEpoch,
+ exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
@@ -61,7 +61,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(logs.GetNoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
return false
}
@@ -69,7 +69,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for i := range addrs {
select {
case <-ctx.Done():
- exec.log.Debug(logs.GetInterruptPlacementIterationByContext,
+ exec.log.Debug(logs.InterruptPlacementIterationByContext,
zap.String("error", ctx.Err().Error()),
)
diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go
index 7f090dd50..1bd5aa7f8 100644
--- a/pkg/services/object/get/exec.go
+++ b/pkg/services/object/get/exec.go
@@ -150,7 +150,7 @@ func (exec *execCtx) initEpoch() bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug(logs.GetCouldNotGetCurrentEpochNumber,
+ exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index bb0d669da..6295de9a9 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -84,7 +84,7 @@ func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) st
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug(logs.GetServingRequest)
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
exec.executeLocal(ctx)
@@ -96,7 +96,7 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
case statusOK:
- exec.log.Debug(logs.GetOperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
case statusINHUMED:
exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
case statusVIRTUAL:
@@ -105,7 +105,7 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
case statusOutOfRange:
exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
default:
- exec.log.Debug(logs.GetOperationFinishedWithError,
+ exec.log.Debug(logs.OperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index f4f74083b..ac8ec5105 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -16,7 +16,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
defer span.End()
- exec.log.Debug(logs.GetProcessingNode)
+ exec.log.Debug(logs.ProcessingNode)
client, ok := exec.remoteClient(info)
if !ok {
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index 9df438e00..2b6101a98 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -18,7 +18,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
lookupDepth := exec.netmapLookupDepth()
- exec.log.Debug(logs.SearchTryingToExecuteInContainer,
+ exec.log.Debug(logs.TryingToExecuteInContainer,
zap.Uint64("netmap lookup depth", lookupDepth),
)
@@ -49,7 +49,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
}
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
- exec.log.Debug(logs.SearchProcessEpoch,
+ exec.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", exec.curProcEpoch),
)
@@ -64,7 +64,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
for {
addrs := traverser.Next()
if len(addrs) == 0 {
- exec.log.Debug(logs.SearchNoMoreNodesAbortPlacementIteration)
+ exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
break
}
@@ -77,7 +77,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
defer wg.Done()
select {
case <-ctx.Done():
- exec.log.Debug(logs.SearchInterruptPlacementIterationByContext,
+ exec.log.Debug(logs.InterruptPlacementIterationByContext,
zap.String("error", ctx.Err().Error()))
return
default:
@@ -87,7 +87,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
client.NodeInfoFromNetmapElement(&info, addrs[i])
- exec.log.Debug(logs.SearchProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
+ exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
c, err := exec.svc.clientConstructor.get(info)
if err != nil {
diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go
index 1733d7840..475a31b98 100644
--- a/pkg/services/object/search/exec.go
+++ b/pkg/services/object/search/exec.go
@@ -81,7 +81,7 @@ func (exec *execCtx) initEpoch() bool {
exec.status = statusUndefined
exec.err = err
- exec.log.Debug(logs.SearchCouldNotGetCurrentEpochNumber,
+ exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
zap.String("error", err.Error()),
)
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index 5bf0710ad..b192e1d04 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -24,7 +24,7 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
}
func (exec *execCtx) execute(ctx context.Context) {
- exec.log.Debug(logs.SearchServingRequest)
+ exec.log.Debug(logs.ServingRequest)
// perform local operation
exec.executeLocal()
@@ -36,11 +36,11 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
// analyze local result
switch exec.status {
default:
- exec.log.Debug(logs.SearchOperationFinishedWithError,
+ exec.log.Debug(logs.OperationFinishedWithError,
zap.String("error", exec.err.Error()),
)
case statusOK:
- exec.log.Debug(logs.SearchOperationFinishedSuccessfully)
+ exec.log.Debug(logs.OperationFinishedSuccessfully)
}
if execCnr {
diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go
index 0be6497be..5ad1d6518 100644
--- a/pkg/services/session/executor.go
+++ b/pkg/services/session/executor.go
@@ -29,7 +29,7 @@ func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server {
}
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
- s.log.Debug(logs.SessionServingRequest,
+ s.log.Debug(logs.ServingRequest,
zap.String("component", "SessionService"),
zap.String("request", "Create"),
)
From 4496999e525eee83cc9472f9bf573a6f7ad96b58 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 11 Apr 2023 15:04:05 +0300
Subject: [PATCH 0103/1943] [#100] Fix CHANGELOG
Signed-off-by: Anton Nikiforov
---
CHANGELOG.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 270d0265e..01a36c66d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -86,7 +86,7 @@ You need to change configuration environment variables to `FROSTFS_*` if you use
New config field `object.delete.tombstone_lifetime` allows to set tombstone lifetime
more appropriate for a specific deployment.
-Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__`
+Use `__SYSTEM__` prefix for system attributes instead of `__NEOFS__`
(existed objects with old attributes will be treated as before, but for new objects new attributes will be used).
## Older versions
From 0c6aeaaf18a708d051b72bb4a75c452611d1617d Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Thu, 13 Apr 2023 17:42:59 +0300
Subject: [PATCH 0104/1943] [#100] adm: Take net settings into account during
netmap contract update
Signed-off-by: Anton Nikiforov
---
CHANGELOG.md | 2 +
.../internal/modules/morph/config.go | 32 +++-----
.../modules/morph/initialize_deploy.go | 77 +++++++++++++++----
.../internal/modules/morph/netmap_util.go | 47 +++++++++++
4 files changed, 118 insertions(+), 40 deletions(-)
create mode 100644 cmd/frostfs-adm/internal/modules/morph/netmap_util.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 01a36c66d..9d284f6fc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,8 @@ Changelog for FrostFS Node
### Added
### Changed
### Fixed
+- Take network settings into account during netmap contract update (#100)
+
### Removed
### Updated
### Updating from v0.36.0
diff --git a/cmd/frostfs-adm/internal/modules/morph/config.go b/cmd/frostfs-adm/internal/modules/morph/config.go
index 8a888ab2c..11e1bd4f6 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config.go
@@ -15,7 +15,6 @@ import (
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@@ -48,23 +47,12 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
buf := bytes.NewBuffer(nil)
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
- for _, param := range arr {
- tuple, ok := param.Value().([]stackitem.Item)
- if !ok || len(tuple) != 2 {
- return errors.New("invalid ListConfig response from netmap contract")
- }
-
- k, err := tuple[0].TryBytes()
- if err != nil {
- return errors.New("invalid config key from netmap contract")
- }
-
- v, err := tuple[1].TryBytes()
- if err != nil {
- return invalidConfigValueErr(k)
- }
-
- switch string(k) {
+ m, err := parseConfigFromNetmapContract(arr)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
case netmapAuditFeeKey, netmapBasicIncomeRateKey,
netmapContainerFeeKey, netmapContainerAliasFeeKey,
netmapEigenTrustIterationsKey,
@@ -77,12 +65,10 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
case netmapEigenTrustAlphaKey:
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (str)\n", k, v)))
case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey:
- vBool, err := tuple[1].TryBool()
- if err != nil {
+ if len(v) == 0 || len(v) > 1 {
return invalidConfigValueErr(k)
}
-
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, vBool)))
+ _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
default:
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
}
@@ -187,6 +173,6 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
return
}
-func invalidConfigValueErr(key []byte) error {
+func invalidConfigValueErr(key string) error {
return fmt.Errorf("invalid %s config value from netmap contract", key)
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
index ae80c2ffd..3446cd0b8 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
@@ -23,6 +23,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/management"
+ "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
@@ -30,8 +31,8 @@ import (
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/vm/emit"
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
- "github.com/spf13/viper"
)
const (
@@ -85,6 +86,21 @@ var (
nnsContract,
alphabetContract,
}, contractList...)
+
+ netmapConfigKeys = []string{
+ netmapEpochKey,
+ netmapMaxObjectSizeKey,
+ netmapAuditFeeKey,
+ netmapContainerFeeKey,
+ netmapContainerAliasFeeKey,
+ netmapEigenTrustIterationsKey,
+ netmapEigenTrustAlphaKey,
+ netmapBasicIncomeRateKey,
+ netmapInnerRingCandidateFeeKey,
+ netmapWithdrawFeeKey,
+ netmapHomomorphicHashDisabledKey,
+ netmapMaintenanceAllowedKey,
+ }
)
type contractState struct {
@@ -239,7 +255,7 @@ func (c *initializeContext) deployOrUpdateContracts(w *io2.BufBinWriter, nnsHash
invokeHash = ctrHash
}
- params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam))
+ params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, updateMethodName))
res, err := c.CommitteeAct.MakeCall(invokeHash, method, params...)
if err != nil {
if method != updateMethodName || !strings.Contains(err.Error(), common.ErrAlreadyUpdated) {
@@ -362,7 +378,7 @@ func (c *initializeContext) deployContracts() error {
return fmt.Errorf("can't sign manifest group: %v", err)
}
- params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam))
+ params := getContractDeployParameters(cs, c.getContractDeployData(ctrName, keysParam, deployMethodName))
res, err := c.CommitteeAct.MakeCall(management.Hash, deployMethodName, params...)
if err != nil {
return fmt.Errorf("can't deploy %s contract: %w", ctrName, err)
@@ -529,7 +545,7 @@ func getContractDeployParameters(cs *contractState, deployData []any) []any {
return []any{cs.RawNEF, cs.RawManifest, deployData}
}
-func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any) []any {
+func (c *initializeContext) getContractDeployData(ctrName string, keysParam []any, method string) []any {
items := make([]any, 1, 6)
items[0] = false // notaryDisabled is false
@@ -566,20 +582,31 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
c.Contracts[netmapContract].Hash,
c.Contracts[containerContract].Hash)
case netmapContract:
- configParam := []any{
- netmapEpochKey, viper.GetInt64(epochDurationInitFlag),
- netmapMaxObjectSizeKey, viper.GetInt64(maxObjectSizeInitFlag),
- netmapAuditFeeKey, viper.GetInt64(auditFeeInitFlag),
- netmapContainerFeeKey, viper.GetInt64(containerFeeInitFlag),
- netmapContainerAliasFeeKey, viper.GetInt64(containerAliasFeeInitFlag),
- netmapEigenTrustIterationsKey, int64(defaultEigenTrustIterations),
- netmapEigenTrustAlphaKey, defaultEigenTrustAlpha,
- netmapBasicIncomeRateKey, viper.GetInt64(incomeRateInitFlag),
- netmapInnerRingCandidateFeeKey, viper.GetInt64(candidateFeeInitFlag),
- netmapWithdrawFeeKey, viper.GetInt64(withdrawFeeInitFlag),
- netmapHomomorphicHashDisabledKey, viper.GetBool(homomorphicHashDisabledInitFlag),
- netmapMaintenanceAllowedKey, viper.GetBool(maintenanceModeAllowedInitFlag),
+ md := getDefaultNetmapContractConfigMap()
+ if method == updateMethodName {
+ arr, err := c.getNetConfigFromNetmapContract()
+ if err != nil {
+ panic(err)
+ }
+ m, err := parseConfigFromNetmapContract(arr)
+ if err != nil {
+ panic(err)
+ }
+ for k, v := range m {
+ for _, key := range netmapConfigKeys {
+ if k == key {
+ md[k] = v
+ break
+ }
+ }
+ }
}
+
+ var configParam []any
+ for k, v := range md {
+ configParam = append(configParam, k, v)
+ }
+
items = append(items,
c.Contracts[balanceContract].Hash,
c.Contracts[containerContract].Hash,
@@ -595,6 +622,22 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
return items
}
+func (c *initializeContext) getNetConfigFromNetmapContract() ([]stackitem.Item, error) {
+ cs, err := c.Client.GetContractStateByID(1)
+ if err != nil {
+ return nil, fmt.Errorf("NNS is not yet deployed: %w", err)
+ }
+ nmHash, err := nnsResolveHash(c.ReadOnlyInvoker, cs.Hash, netmapContract+".frostfs")
+ if err != nil {
+ return nil, fmt.Errorf("can't get netmap contract hash: %w", err)
+ }
+ arr, err := unwrap.Array(c.ReadOnlyInvoker.Call(nmHash, "listConfig"))
+ if err != nil {
+ return nil, fmt.Errorf("can't fetch list of network config keys from the netmap contract")
+ }
+ return arr, err
+}
+
func (c *initializeContext) getAlphabetDeployItems(i, n int) []any {
items := make([]any, 6)
items[0] = false
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
new file mode 100644
index 000000000..2ba48b542
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
@@ -0,0 +1,47 @@
+package morph
+
+import (
+ "errors"
+
+ "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
+ "github.com/spf13/viper"
+)
+
+func getDefaultNetmapContractConfigMap() map[string]any {
+ m := make(map[string]any)
+ m[netmapEpochKey] = viper.GetInt64(epochDurationInitFlag)
+ m[netmapMaxObjectSizeKey] = viper.GetInt64(maxObjectSizeInitFlag)
+ m[netmapAuditFeeKey] = viper.GetInt64(auditFeeInitFlag)
+ m[netmapContainerFeeKey] = viper.GetInt64(containerFeeInitFlag)
+ m[netmapContainerAliasFeeKey] = viper.GetInt64(containerAliasFeeInitFlag)
+ m[netmapEigenTrustIterationsKey] = int64(defaultEigenTrustIterations)
+ m[netmapEigenTrustAlphaKey] = defaultEigenTrustAlpha
+ m[netmapBasicIncomeRateKey] = viper.GetInt64(incomeRateInitFlag)
+ m[netmapInnerRingCandidateFeeKey] = viper.GetInt64(candidateFeeInitFlag)
+ m[netmapWithdrawFeeKey] = viper.GetInt64(withdrawFeeInitFlag)
+ m[netmapHomomorphicHashDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)
+ m[netmapMaintenanceAllowedKey] = viper.GetBool(maintenanceModeAllowedInitFlag)
+ return m
+}
+
+func parseConfigFromNetmapContract(arr []stackitem.Item) (map[string][]byte, error) {
+ m := make(map[string][]byte, len(arr))
+ for _, param := range arr {
+ tuple, ok := param.Value().([]stackitem.Item)
+ if !ok || len(tuple) != 2 {
+ return nil, errors.New("invalid ListConfig response from netmap contract")
+ }
+
+ k, err := tuple[0].TryBytes()
+ if err != nil {
+ return nil, errors.New("invalid config key from netmap contract")
+ }
+
+ v, err := tuple[1].TryBytes()
+ if err != nil {
+ return nil, invalidConfigValueErr(string(k))
+ }
+ m[string(k)] = v
+ }
+ return m, nil
+}
From 299b6a69389f6bb154ca9c6d442388234038929b Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 11 Apr 2023 14:59:24 +0300
Subject: [PATCH 0105/1943] [#100] adm: Use netmap constants from pkg
Signed-off-by: Anton Nikiforov
---
.../internal/modules/morph/config.go | 29 ++++----
.../modules/morph/initialize_deploy.go | 38 ++++------
.../internal/modules/morph/netmap_util.go | 25 ++++---
pkg/morph/client/netmap/config.go | 74 +++++++++----------
4 files changed, 78 insertions(+), 88 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/config.go b/cmd/frostfs-adm/internal/modules/morph/config.go
index 11e1bd4f6..3a60e7197 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config.go
@@ -10,6 +10,7 @@ import (
"strings"
"text/tabwriter"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@@ -53,18 +54,18 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
}
for k, v := range m {
switch k {
- case netmapAuditFeeKey, netmapBasicIncomeRateKey,
- netmapContainerFeeKey, netmapContainerAliasFeeKey,
- netmapEigenTrustIterationsKey,
- netmapEpochKey, netmapInnerRingCandidateFeeKey,
- netmapMaxObjectSizeKey, netmapWithdrawFeeKey:
+ case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
+ netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
+ netmap.EtIterationsConfig,
+ netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
+ netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
- case netmapEigenTrustAlphaKey:
+ case netmap.EtAlphaConfig:
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (str)\n", k, v)))
- case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey:
+ case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return invalidConfigValueErr(k)
}
@@ -136,16 +137,16 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
valRaw := v
switch key {
- case netmapAuditFeeKey, netmapBasicIncomeRateKey,
- netmapContainerFeeKey, netmapContainerAliasFeeKey,
- netmapEigenTrustIterationsKey,
- netmapEpochKey, netmapInnerRingCandidateFeeKey,
- netmapMaxObjectSizeKey, netmapWithdrawFeeKey:
+ case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
+ netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
+ netmap.EtIterationsConfig,
+ netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
+ netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
val, err = strconv.ParseInt(valRaw, 10, 64)
if err != nil {
err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err)
}
- case netmapEigenTrustAlphaKey:
+ case netmap.EtAlphaConfig:
// just check that it could
// be parsed correctly
_, err = strconv.ParseFloat(v, 64)
@@ -154,7 +155,7 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
}
val = valRaw
- case netmapHomomorphicHashDisabledKey, netmapMaintenanceAllowedKey:
+ case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
val, err = strconv.ParseBool(valRaw)
if err != nil {
err = fmt.Errorf("could not parse %s's value '%s' as bool: %w", key, valRaw, err)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
index 3446cd0b8..9e473463b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
@@ -16,6 +16,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/core/state"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
@@ -51,19 +52,6 @@ const (
)
const (
- netmapEpochKey = "EpochDuration"
- netmapMaxObjectSizeKey = "MaxObjectSize"
- netmapAuditFeeKey = "AuditFee"
- netmapContainerFeeKey = "ContainerFee"
- netmapContainerAliasFeeKey = "ContainerAliasFee"
- netmapEigenTrustIterationsKey = "EigenTrustIterations"
- netmapEigenTrustAlphaKey = "EigenTrustAlpha"
- netmapBasicIncomeRateKey = "BasicIncomeRate"
- netmapInnerRingCandidateFeeKey = "InnerRingCandidateFee"
- netmapWithdrawFeeKey = "WithdrawFee"
- netmapHomomorphicHashDisabledKey = "HomomorphicHashingDisabled"
- netmapMaintenanceAllowedKey = "MaintenanceModeAllowed"
-
defaultEigenTrustIterations = 4
defaultEigenTrustAlpha = "0.1"
)
@@ -88,18 +76,18 @@ var (
}, contractList...)
netmapConfigKeys = []string{
- netmapEpochKey,
- netmapMaxObjectSizeKey,
- netmapAuditFeeKey,
- netmapContainerFeeKey,
- netmapContainerAliasFeeKey,
- netmapEigenTrustIterationsKey,
- netmapEigenTrustAlphaKey,
- netmapBasicIncomeRateKey,
- netmapInnerRingCandidateFeeKey,
- netmapWithdrawFeeKey,
- netmapHomomorphicHashDisabledKey,
- netmapMaintenanceAllowedKey,
+ netmap.EpochDurationConfig,
+ netmap.MaxObjectSizeConfig,
+ netmap.AuditFeeConfig,
+ netmap.ContainerFeeConfig,
+ netmap.ContainerAliasFeeConfig,
+ netmap.EtIterationsConfig,
+ netmap.EtAlphaConfig,
+ netmap.BasicIncomeRateConfig,
+ netmap.IrCandidateFeeConfig,
+ netmap.WithdrawFeeConfig,
+ netmap.HomomorphicHashingDisabledKey,
+ netmap.MaintenanceModeAllowedConfig,
}
)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
index 2ba48b542..4b6de5bd2 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
@@ -3,24 +3,25 @@ package morph
import (
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
"github.com/spf13/viper"
)
func getDefaultNetmapContractConfigMap() map[string]any {
m := make(map[string]any)
- m[netmapEpochKey] = viper.GetInt64(epochDurationInitFlag)
- m[netmapMaxObjectSizeKey] = viper.GetInt64(maxObjectSizeInitFlag)
- m[netmapAuditFeeKey] = viper.GetInt64(auditFeeInitFlag)
- m[netmapContainerFeeKey] = viper.GetInt64(containerFeeInitFlag)
- m[netmapContainerAliasFeeKey] = viper.GetInt64(containerAliasFeeInitFlag)
- m[netmapEigenTrustIterationsKey] = int64(defaultEigenTrustIterations)
- m[netmapEigenTrustAlphaKey] = defaultEigenTrustAlpha
- m[netmapBasicIncomeRateKey] = viper.GetInt64(incomeRateInitFlag)
- m[netmapInnerRingCandidateFeeKey] = viper.GetInt64(candidateFeeInitFlag)
- m[netmapWithdrawFeeKey] = viper.GetInt64(withdrawFeeInitFlag)
- m[netmapHomomorphicHashDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)
- m[netmapMaintenanceAllowedKey] = viper.GetBool(maintenanceModeAllowedInitFlag)
+ m[netmap.EpochDurationConfig] = viper.GetInt64(epochDurationInitFlag)
+ m[netmap.MaxObjectSizeConfig] = viper.GetInt64(maxObjectSizeInitFlag)
+ m[netmap.AuditFeeConfig] = viper.GetInt64(auditFeeInitFlag)
+ m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag)
+ m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag)
+ m[netmap.EtIterationsConfig] = int64(defaultEigenTrustIterations)
+ m[netmap.EtAlphaConfig] = defaultEigenTrustAlpha
+ m[netmap.BasicIncomeRateConfig] = viper.GetInt64(incomeRateInitFlag)
+ m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag)
+ m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag)
+ m[netmap.HomomorphicHashingDisabledKey] = viper.GetBool(homomorphicHashDisabledInitFlag)
+ m[netmap.MaintenanceModeAllowedConfig] = viper.GetBool(maintenanceModeAllowedInitFlag)
return m
}
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 6b721cdfb..3011bd541 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -11,24 +11,24 @@ import (
)
const (
- maxObjectSizeConfig = "MaxObjectSize"
- basicIncomeRateConfig = "BasicIncomeRate"
- auditFeeConfig = "AuditFee"
- epochDurationConfig = "EpochDuration"
- containerFeeConfig = "ContainerFee"
- containerAliasFeeConfig = "ContainerAliasFee"
- etIterationsConfig = "EigenTrustIterations"
- etAlphaConfig = "EigenTrustAlpha"
- irCandidateFeeConfig = "InnerRingCandidateFee"
- withdrawFeeConfig = "WithdrawFee"
- homomorphicHashingDisabledKey = "HomomorphicHashingDisabled"
- maintenanceModeAllowedConfig = "MaintenanceModeAllowed"
+ MaxObjectSizeConfig = "MaxObjectSize"
+ BasicIncomeRateConfig = "BasicIncomeRate"
+ AuditFeeConfig = "AuditFee"
+ EpochDurationConfig = "EpochDuration"
+ ContainerFeeConfig = "ContainerFee"
+ ContainerAliasFeeConfig = "ContainerAliasFee"
+ EtIterationsConfig = "EigenTrustIterations"
+ EtAlphaConfig = "EigenTrustAlpha"
+ IrCandidateFeeConfig = "InnerRingCandidateFee"
+ WithdrawFeeConfig = "WithdrawFee"
+ HomomorphicHashingDisabledKey = "HomomorphicHashingDisabled"
+ MaintenanceModeAllowedConfig = "MaintenanceModeAllowed"
)
// MaxObjectSize receives max object size configuration
// value through the Netmap contract call.
func (c *Client) MaxObjectSize() (uint64, error) {
- objectSize, err := c.readUInt64Config(maxObjectSizeConfig)
+ objectSize, err := c.readUInt64Config(MaxObjectSizeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err)
}
@@ -39,7 +39,7 @@ func (c *Client) MaxObjectSize() (uint64, error) {
// BasicIncomeRate returns basic income rate configuration value from network
// config in netmap contract.
func (c *Client) BasicIncomeRate() (uint64, error) {
- rate, err := c.readUInt64Config(basicIncomeRateConfig)
+ rate, err := c.readUInt64Config(BasicIncomeRateConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get basic income rate: %w", c, err)
}
@@ -50,7 +50,7 @@ func (c *Client) BasicIncomeRate() (uint64, error) {
// AuditFee returns audit fee configuration value from network
// config in netmap contract.
func (c *Client) AuditFee() (uint64, error) {
- fee, err := c.readUInt64Config(auditFeeConfig)
+ fee, err := c.readUInt64Config(AuditFeeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get audit fee: %w", c, err)
}
@@ -60,7 +60,7 @@ func (c *Client) AuditFee() (uint64, error) {
// EpochDuration returns number of sidechain blocks per one FrostFS epoch.
func (c *Client) EpochDuration() (uint64, error) {
- epochDuration, err := c.readUInt64Config(epochDurationConfig)
+ epochDuration, err := c.readUInt64Config(EpochDurationConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err)
}
@@ -71,7 +71,7 @@ func (c *Client) EpochDuration() (uint64, error) {
// ContainerFee returns fee paid by container owner to each alphabet node
// for container registration.
func (c *Client) ContainerFee() (uint64, error) {
- fee, err := c.readUInt64Config(containerFeeConfig)
+ fee, err := c.readUInt64Config(ContainerFeeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err)
}
@@ -82,7 +82,7 @@ func (c *Client) ContainerFee() (uint64, error) {
// ContainerAliasFee returns additional fee paid by container owner to each
// alphabet node for container nice name registration.
func (c *Client) ContainerAliasFee() (uint64, error) {
- fee, err := c.readUInt64Config(containerAliasFeeConfig)
+ fee, err := c.readUInt64Config(ContainerAliasFeeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err)
}
@@ -93,7 +93,7 @@ func (c *Client) ContainerAliasFee() (uint64, error) {
// EigenTrustIterations returns global configuration value of iteration cycles
// for EigenTrust algorithm per epoch.
func (c *Client) EigenTrustIterations() (uint64, error) {
- iterations, err := c.readUInt64Config(etIterationsConfig)
+ iterations, err := c.readUInt64Config(EtIterationsConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get eigen trust iterations: %w", c, err)
}
@@ -104,7 +104,7 @@ func (c *Client) EigenTrustIterations() (uint64, error) {
// EigenTrustAlpha returns global configuration value of alpha parameter.
// It receives the alpha as a string and tries to convert it to float.
func (c *Client) EigenTrustAlpha() (float64, error) {
- strAlpha, err := c.readStringConfig(etAlphaConfig)
+ strAlpha, err := c.readStringConfig(EtAlphaConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get eigen trust alpha: %w", c, err)
}
@@ -117,13 +117,13 @@ func (c *Client) EigenTrustAlpha() (float64, error) {
//
// Returns (false, nil) if config key is not found in the contract.
func (c *Client) HomomorphicHashDisabled() (bool, error) {
- return c.readBoolConfig(homomorphicHashingDisabledKey)
+ return c.readBoolConfig(HomomorphicHashingDisabledKey)
}
// InnerRingCandidateFee returns global configuration value of fee paid by
// node to be in inner ring candidates list.
func (c *Client) InnerRingCandidateFee() (uint64, error) {
- fee, err := c.readUInt64Config(irCandidateFeeConfig)
+ fee, err := c.readUInt64Config(IrCandidateFeeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err)
}
@@ -134,7 +134,7 @@ func (c *Client) InnerRingCandidateFee() (uint64, error) {
// WithdrawFee returns global configuration value of fee paid by user to
// withdraw assets from FrostFS contract.
func (c *Client) WithdrawFee() (uint64, error) {
- fee, err := c.readUInt64Config(withdrawFeeConfig)
+ fee, err := c.readUInt64Config(WithdrawFeeConfig)
if err != nil {
return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err)
}
@@ -148,7 +148,7 @@ func (c *Client) WithdrawFee() (uint64, error) {
//
// By default, maintenance state is disallowed.
func (c *Client) MaintenanceModeAllowed() (bool, error) {
- return c.readBoolConfig(maintenanceModeAllowedConfig)
+ return c.readBoolConfig(MaintenanceModeAllowedConfig)
}
func (c *Client) readUInt64Config(key string) (uint64, error) {
@@ -299,32 +299,32 @@ func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
Name: name,
Value: value,
})
- case maxObjectSizeConfig:
+ case MaxObjectSizeConfig:
res.MaxObjectSize = bytesToUint64(value)
- case basicIncomeRateConfig:
+ case BasicIncomeRateConfig:
res.StoragePrice = bytesToUint64(value)
- case auditFeeConfig:
+ case AuditFeeConfig:
res.AuditFee = bytesToUint64(value)
- case epochDurationConfig:
+ case EpochDurationConfig:
res.EpochDuration = bytesToUint64(value)
- case containerFeeConfig:
+ case ContainerFeeConfig:
res.ContainerFee = bytesToUint64(value)
- case containerAliasFeeConfig:
+ case ContainerAliasFeeConfig:
res.ContainerAliasFee = bytesToUint64(value)
- case etIterationsConfig:
+ case EtIterationsConfig:
res.EigenTrustIterations = bytesToUint64(value)
- case etAlphaConfig:
+ case EtAlphaConfig:
res.EigenTrustAlpha, err = strconv.ParseFloat(string(value), 64)
if err != nil {
- return fmt.Errorf("invalid prm %s: %v", etAlphaConfig, err)
+ return fmt.Errorf("invalid prm %s: %v", EtAlphaConfig, err)
}
- case irCandidateFeeConfig:
+ case IrCandidateFeeConfig:
res.IRCandidateFee = bytesToUint64(value)
- case withdrawFeeConfig:
+ case WithdrawFeeConfig:
res.WithdrawalFee = bytesToUint64(value)
- case homomorphicHashingDisabledKey:
+ case HomomorphicHashingDisabledKey:
res.HomomorphicHashingDisabled = bytesToBool(value)
- case maintenanceModeAllowedConfig:
+ case MaintenanceModeAllowedConfig:
res.MaintenanceModeAllowed = bytesToBool(value)
}
From adcfce39cf707466dfa0cf571d496dd1ba779fa0 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 13 Apr 2023 16:04:42 +0300
Subject: [PATCH 0106/1943] [#246] .gitattributes: Do not show diff for go.sum
When we update dependencies it can be rather big. However it is
generated automatically with `go mod tidy`, no need to review.
Signed-off-by: Evgenii Stratonikov
---
.gitattributes | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitattributes b/.gitattributes
index c7a3f7a86..aa9391657 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,2 +1,3 @@
/**/*.pb.go -diff -merge
/**/*.pb.go linguist-generated=true
+/go.sum -diff
From 41eb3129ae0fbe1dc177d9dbbd5ab6bb3917da97 Mon Sep 17 00:00:00 2001
From: Alejandro Lopez
Date: Fri, 14 Apr 2023 12:16:14 +0300
Subject: [PATCH 0107/1943] [#139] Refactor blobovnicza exist test to not use
chmod
Signed-off-by: Alejandro Lopez
---
internal/logs/logs.go | 2 +-
pkg/innerring/processors/netmap/process_peers.go | 2 +-
.../blobstor/blobovniczatree/exists_test.go | 9 +++++----
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 83acedfb4..062538747 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -490,7 +490,7 @@ const (
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotGetNetworkMapCandidates = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
- NetmapCouldNotUnmarshalSubnetId = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
+ NetmapCouldNotUnmarshalSubnetID = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapGotZeroSubnetInRemoveNodeNotification = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index ffaad3b4e..130d08568 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -159,7 +159,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
if err != nil {
- np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId,
+ np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetID,
zap.Error(err),
)
return
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index 08fd2223f..0c7c61d76 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -53,10 +53,11 @@ func TestExistsInvalidStorageID(t *testing.T) {
t.Run("invalid storage id", func(t *testing.T) {
storageID := slice.Copy(putRes.StorageID)
storageID[0] = '9'
- badDir := filepath.Join(dir, "9")
- require.NoError(t, os.MkdirAll(badDir, os.ModePerm))
- require.NoError(t, os.Chmod(badDir, 0))
- t.Cleanup(func() { _ = os.Chmod(filepath.Join(dir, "9"), os.ModePerm) })
+
+ // An invalid boltdb file is created so that it returns an error when opened
+ badFileDir := filepath.Join(dir, "9", "0")
+ require.NoError(t, os.MkdirAll(badFileDir, os.ModePerm))
+ require.NoError(t, os.WriteFile(filepath.Join(badFileDir, "0"), []byte("not a boltdb file content"), 0777))
res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID})
require.Error(t, err)
From 8d2f443868ec600b020d5e1ff3c3b0ba866e3f9b Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Thu, 13 Apr 2023 12:02:29 +0300
Subject: [PATCH 0108/1943] [#238] Fix linter error
Signed-off-by: Anton Nikiforov
---
config/example/node.env | 2 +-
config/example/node.yaml | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/config/example/node.env b/config/example/node.env
index 9a1a8b052..a4088f75a 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -187,4 +187,4 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
-FROSTFS_TRACING_EXPORTER="otlp_grpc"
\ No newline at end of file
+FROSTFS_TRACING_EXPORTER="otlp_grpc"
diff --git a/config/example/node.yaml b/config/example/node.yaml
index e3b41d413..0d71f0fd2 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -219,4 +219,3 @@ tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
-
\ No newline at end of file
From 995db117d0cd6df0a7ab61d5fad271f0c1556621 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Fri, 14 Apr 2023 12:04:46 +0300
Subject: [PATCH 0109/1943] [#238] node: Read cfg from dir even if cfg file not
set
Signed-off-by: Anton Nikiforov
---
CHANGELOG.md | 1 +
cmd/frostfs-node/config/configdir_test.go | 24 +++++++++++++++++++++++
pkg/util/config/dir.go | 24 +++++++++--------------
3 files changed, 34 insertions(+), 15 deletions(-)
create mode 100644 cmd/frostfs-node/config/configdir_test.go
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9d284f6fc..94018e357 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ Changelog for FrostFS Node
### Changed
### Fixed
- Take network settings into account during netmap contract update (#100)
+- Read config files from dir even if config file not provided via `--config` for node (#238)
### Removed
### Updated
diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go
new file mode 100644
index 000000000..2c3255670
--- /dev/null
+++ b/cmd/frostfs-node/config/configdir_test.go
@@ -0,0 +1,24 @@
+package config
+
+import (
+ "os"
+ "path"
+ "testing"
+
+ "github.com/spf13/cast"
+ "github.com/stretchr/testify/require"
+)
+
+func TestConfigDir(t *testing.T) {
+ dir := t.TempDir()
+
+ cfgFileName0 := path.Join(dir, "cfg_00.json")
+ cfgFileName1 := path.Join(dir, "cfg_01.yml")
+
+ require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0777))
+ require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0777))
+
+ c := New(Prm{}, WithConfigDir(dir))
+ require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level")))
+ require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size")))
+}
diff --git a/pkg/util/config/dir.go b/pkg/util/config/dir.go
index a74992d19..0379fe268 100644
--- a/pkg/util/config/dir.go
+++ b/pkg/util/config/dir.go
@@ -1,6 +1,7 @@
package config
import (
+ "fmt"
"os"
"path"
@@ -20,7 +21,7 @@ func ReadConfigDir(v *viper.Viper, configDir string) error {
continue
}
ext := path.Ext(entry.Name())
- if ext != ".yaml" && ext != ".yml" {
+ if ext != ".yaml" && ext != ".yml" && ext != ".json" {
continue
}
@@ -33,22 +34,15 @@ func ReadConfigDir(v *viper.Viper, configDir string) error {
}
// mergeConfig reads config file and merge its content with current viper.
-func mergeConfig(v *viper.Viper, fileName string) (err error) {
- var cfgFile *os.File
- cfgFile, err = os.Open(fileName)
+func mergeConfig(v *viper.Viper, fileName string) error {
+ cv := viper.New()
+ cv.SetConfigFile(fileName)
+ err := cv.ReadInConfig()
if err != nil {
- return err
+ return fmt.Errorf("failed to read config: %w", err)
}
-
- defer func() {
- errClose := cfgFile.Close()
- if err == nil {
- err = errClose
- }
- }()
-
- if err = v.MergeConfig(cfgFile); err != nil {
- return err
+ if err = v.MergeConfigMap(cv.AllSettings()); err != nil {
+ return fmt.Errorf("failed to merge config: %w", err)
}
return nil
From 200fc8b8822f77597bb3ecc3f96fb0906dc07669 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 12 Apr 2023 11:02:25 +0300
Subject: [PATCH 0110/1943] [#242] put: Pass context to relay function
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/distributed.go | 4 ++--
pkg/services/object/put/prm.go | 6 ++++--
pkg/services/object/put/streamer.go | 8 ++++----
pkg/services/object/put/v2/streamer.go | 4 ++--
4 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 86dc3c2ca..15296f83f 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -36,7 +36,7 @@ type distributedTarget struct {
isLocalKey func([]byte) bool
- relay func(nodeDesc) error
+ relay func(context.Context, nodeDesc) error
fmt *object.FormatValidator
@@ -153,7 +153,7 @@ func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdent
func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
if !node.local && t.relay != nil {
- return t.relay(node)
+ return t.relay(ctx, node)
}
target := t.nodeTargetInitializer(node)
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index aea5926f4..27d9c9c7a 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -1,6 +1,8 @@
package putsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -17,7 +19,7 @@ type PutInitPrm struct {
traverseOpts []placement.Option
- relay func(client.NodeInfo, client.MultiAddressClient) error
+ relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
}
type PutChunkPrm struct {
@@ -40,7 +42,7 @@ func (p *PutInitPrm) WithObject(v *object.Object) *PutInitPrm {
return p
}
-func (p *PutInitPrm) WithRelay(f func(client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm {
+func (p *PutInitPrm) WithRelay(f func(context.Context, client.NodeInfo, client.MultiAddressClient) error) *PutInitPrm {
if p != nil {
p.relay = f
}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index fed161e03..e355990a3 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -23,7 +23,7 @@ type Streamer struct {
target transformer.ObjectTarget
- relay func(client.NodeInfo, client.MultiAddressClient) error
+ relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
maxPayloadSz uint64 // network config
}
@@ -197,9 +197,9 @@ func (p *Streamer) preparePrm(prm *PutInitPrm) error {
}
func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
- var relay func(nodeDesc) error
+ var relay func(context.Context, nodeDesc) error
if p.relay != nil {
- relay = func(node nodeDesc) error {
+ relay = func(ctx context.Context, node nodeDesc) error {
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, node.info)
@@ -209,7 +209,7 @@ func (p *Streamer) newCommonTarget(prm *PutInitPrm) transformer.ObjectTarget {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
- return p.relay(info, c)
+ return p.relay(ctx, info, c)
}
}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 65846ea9f..3b8d7b88c 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -120,7 +120,7 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error
return fromPutResponse(resp), nil
}
-func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClient) error {
+func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) error {
// open stream
resp := new(object.PutResponse)
@@ -144,7 +144,7 @@ func (s *streamer) relayRequest(info client.NodeInfo, c client.MultiAddressClien
var stream *rpc.PutRequestWriter
err = c.RawForAddress(addr, func(cli *rawclient.Client) error {
- stream, err = rpc.PutObject(cli, resp)
+ stream, err = rpc.PutObject(cli, resp, rawclient.WithContext(ctx))
return err
})
if err != nil {
From d62c6e4ce68f661ef7b677098a3734392e88686a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 12 Apr 2023 17:01:29 +0300
Subject: [PATCH 0111/1943] [#242] node: Add tracing spans
Add tracing spans for PUT requests.
Add tracing spans for DELETE requests.
Add tracing spans for SELECT requests.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-lens/internal/meta/inspect.go | 4 +-
cmd/frostfs-node/notificator.go | 2 +-
cmd/frostfs-node/object.go | 24 ++++++------
pkg/core/object/fmt.go | 13 ++++---
pkg/core/object/fmt_test.go | 23 +++++------
.../blobovnicza/blobovnicza_test.go | 2 +-
.../blobovnicza/delete.go | 13 ++++++-
.../blobstor/blobovniczatree/delete.go | 30 +++++++++-----
.../blobstor/blobovniczatree/exists_test.go | 2 +-
.../blobstor/blobovniczatree/put.go | 13 ++++++-
.../blobstor/blobstor_test.go | 4 +-
.../blobstor/common/storage.go | 4 +-
pkg/local_object_storage/blobstor/delete.go | 18 +++++++--
.../blobstor/exists_test.go | 2 +-
.../blobstor/fstree/fstree.go | 17 +++++++-
.../blobstor/internal/blobstortest/common.go | 3 +-
.../blobstor/internal/blobstortest/control.go | 4 +-
.../blobstor/internal/blobstortest/delete.go | 10 ++---
.../blobstor/internal/blobstortest/iterate.go | 3 +-
.../blobstor/iterate_test.go | 3 +-
.../blobstor/memstore/memstore.go | 4 +-
.../blobstor/memstore/memstore_test.go | 4 +-
.../blobstor/perf_test.go | 6 +--
pkg/local_object_storage/blobstor/put.go | 15 ++++++-
.../blobstor/teststore/teststore.go | 8 ++--
pkg/local_object_storage/engine/control.go | 2 +-
.../engine/control_test.go | 2 +-
pkg/local_object_storage/engine/delete.go | 12 +++++-
.../engine/delete_test.go | 4 +-
.../engine/engine_test.go | 4 +-
pkg/local_object_storage/engine/error_test.go | 6 +--
.../engine/evacuate_test.go | 4 +-
pkg/local_object_storage/engine/exists.go | 4 +-
pkg/local_object_storage/engine/get.go | 12 +++---
pkg/local_object_storage/engine/head_test.go | 4 +-
pkg/local_object_storage/engine/inhume.go | 20 ++++++++--
.../engine/inhume_test.go | 10 ++---
pkg/local_object_storage/engine/list_test.go | 3 +-
pkg/local_object_storage/engine/lock.go | 27 +++++++++----
pkg/local_object_storage/engine/lock_test.go | 20 +++++-----
pkg/local_object_storage/engine/put.go | 36 +++++++++--------
.../engine/remove_copies.go | 2 +-
.../engine/remove_copies_test.go | 12 +++---
pkg/local_object_storage/engine/restore.go | 19 +++++++--
pkg/local_object_storage/engine/select.go | 23 ++++++++---
pkg/local_object_storage/engine/tree_test.go | 5 ++-
pkg/local_object_storage/engine/writecache.go | 16 +++++++-
.../metabase/control_test.go | 3 +-
.../metabase/counter_test.go | 17 ++++----
pkg/local_object_storage/metabase/delete.go | 12 +++++-
.../metabase/delete_test.go | 3 +-
pkg/local_object_storage/metabase/exists.go | 12 +++++-
pkg/local_object_storage/metabase/get.go | 13 ++++++-
pkg/local_object_storage/metabase/get_test.go | 9 +++--
.../metabase/graveyard_test.go | 13 ++++---
pkg/local_object_storage/metabase/inhume.go | 7 +++-
.../metabase/inhume_test.go | 21 +++++-----
.../metabase/iterators_test.go | 9 +++--
pkg/local_object_storage/metabase/lock.go | 22 ++++++++++-
.../metabase/lock_test.go | 39 ++++++++++---------
pkg/local_object_storage/metabase/movable.go | 12 +++++-
.../metabase/movable_test.go | 3 +-
pkg/local_object_storage/metabase/put.go | 12 +++++-
pkg/local_object_storage/metabase/put_test.go | 3 +-
pkg/local_object_storage/metabase/select.go | 12 +++++-
.../metabase/select_test.go | 5 ++-
.../metabase/storage_id.go | 12 +++++-
.../metabase/storage_id_test.go | 3 +-
pkg/local_object_storage/shard/control.go | 29 ++++++++------
.../shard/control_test.go | 15 +++----
pkg/local_object_storage/shard/delete.go | 25 ++++++++----
pkg/local_object_storage/shard/delete_test.go | 8 ++--
pkg/local_object_storage/shard/dump_test.go | 24 ++++++------
pkg/local_object_storage/shard/exists.go | 12 +++++-
pkg/local_object_storage/shard/gc.go | 10 ++---
pkg/local_object_storage/shard/gc_test.go | 6 +--
pkg/local_object_storage/shard/get.go | 8 ++--
pkg/local_object_storage/shard/get_test.go | 6 +--
pkg/local_object_storage/shard/head.go | 2 +-
pkg/local_object_storage/shard/head_test.go | 4 +-
pkg/local_object_storage/shard/inhume.go | 13 ++++++-
pkg/local_object_storage/shard/inhume_test.go | 2 +-
pkg/local_object_storage/shard/list.go | 3 +-
pkg/local_object_storage/shard/list_test.go | 3 +-
pkg/local_object_storage/shard/lock.go | 28 +++++++++++--
pkg/local_object_storage/shard/lock_test.go | 14 +++----
.../shard/metrics_test.go | 5 ++-
pkg/local_object_storage/shard/move.go | 16 +++++++-
pkg/local_object_storage/shard/put.go | 19 +++++++--
pkg/local_object_storage/shard/range.go | 2 +-
pkg/local_object_storage/shard/range_test.go | 2 +-
pkg/local_object_storage/shard/reload_test.go | 13 ++++---
pkg/local_object_storage/shard/restore.go | 17 ++++++--
pkg/local_object_storage/shard/select.go | 15 ++++++-
.../shard/shutdown_test.go | 2 +-
pkg/local_object_storage/shard/writecache.go | 16 +++++++-
pkg/local_object_storage/writecache/delete.go | 15 ++++++-
pkg/local_object_storage/writecache/flush.go | 36 ++++++++++-------
.../writecache/flush_test.go | 24 ++++++------
pkg/local_object_storage/writecache/init.go | 24 ++++++------
pkg/local_object_storage/writecache/mode.go | 18 +++++++--
.../writecache/options.go | 6 +--
pkg/local_object_storage/writecache/put.go | 19 +++++++--
.../writecache/storage.go | 3 +-
.../writecache/writecache.go | 14 ++++---
pkg/services/control/server/flush_cache.go | 4 +-
pkg/services/control/server/restore.go | 4 +-
pkg/services/object/delete/util.go | 2 +-
pkg/services/object/internal/client/client.go | 4 ++
pkg/services/object/put/distributed.go | 2 +-
pkg/services/object/put/local.go | 10 ++---
pkg/services/object/put/streamer.go | 4 +-
pkg/services/object/put/v2/streamer.go | 20 +++++++++-
pkg/services/object/put/validation.go | 6 +--
pkg/services/object/search/local.go | 6 ++-
pkg/services/object/search/search.go | 2 +-
pkg/services/object/search/search_test.go | 2 +-
pkg/services/object/search/service.go | 2 +-
pkg/services/object/search/util.go | 4 +-
.../object_manager/transformer/fmt.go | 4 +-
.../object_manager/transformer/transformer.go | 4 +-
.../object_manager/transformer/types.go | 2 +-
122 files changed, 863 insertions(+), 417 deletions(-)
diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go
index fb0065a62..bc7f28a3a 100644
--- a/cmd/frostfs-lens/internal/meta/inspect.go
+++ b/cmd/frostfs-lens/internal/meta/inspect.go
@@ -36,7 +36,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
storageID := meta.StorageIDPrm{}
storageID.SetAddress(addr)
- resStorageID, err := db.StorageID(storageID)
+ resStorageID, err := db.StorageID(cmd.Context(), storageID)
common.ExitOnErr(cmd, common.Errf("could not check if the obj is small: %w", err))
if id := resStorageID.StorageID(); id != nil {
@@ -51,7 +51,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) {
siErr := new(object.SplitInfoError)
- res, err := db.Get(prm)
+ res, err := db.Get(cmd.Context(), prm)
if errors.As(err, &siErr) {
link, linkSet := siErr.SplitInfo().Link()
last, lastSet := siErr.SplitInfo().LastPart()
diff --git a/cmd/frostfs-node/notificator.go b/cmd/frostfs-node/notificator.go
index 9c90e052c..358b39a72 100644
--- a/cmd/frostfs-node/notificator.go
+++ b/cmd/frostfs-node/notificator.go
@@ -42,7 +42,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
for _, c := range listRes.Containers() {
selectPrm.WithContainerID(c)
- selectRes, err := n.e.Select(selectPrm)
+ selectRes, err := n.e.Select(ctx, selectPrm)
if err != nil {
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
zap.Stringer("cid", c),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 8f5a83eb0..08a202df9 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -617,20 +617,20 @@ type engineWithNotifications struct {
defaultTopic string
}
-func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
- return e.base.IsLocked(address)
+func (e engineWithNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
+ return e.base.IsLocked(ctx, address)
}
func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
return e.base.Delete(ctx, tombstone, toDelete)
}
-func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
- return e.base.Lock(locker, toLock)
+func (e engineWithNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
+ return e.base.Lock(ctx, locker, toLock)
}
-func (e engineWithNotifications) Put(o *objectSDK.Object) error {
- if err := e.base.Put(o); err != nil {
+func (e engineWithNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
+ if err := e.base.Put(ctx, o); err != nil {
return err
}
@@ -654,8 +654,8 @@ type engineWithoutNotifications struct {
engine *engine.StorageEngine
}
-func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error) {
- return e.engine.IsLocked(address)
+func (e engineWithoutNotifications) IsLocked(ctx context.Context, address oid.Address) (bool, error) {
+ return e.engine.IsLocked(ctx, address)
}
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
@@ -673,10 +673,10 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad
return err
}
-func (e engineWithoutNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
- return e.engine.Lock(locker.Container(), locker.Object(), toLock)
+func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error {
+ return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(o *objectSDK.Object) error {
- return engine.Put(e.engine, o)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
+ return engine.Put(ctx, e.engine, o)
}
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 33373b7cc..ef99f3058 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -1,6 +1,7 @@
package object
import (
+ "context"
"crypto/ecdsa"
"errors"
"fmt"
@@ -42,7 +43,7 @@ type DeleteHandler interface {
// LockSource is a source of lock relations between the objects.
type LockSource interface {
// IsLocked must clarify object's lock status.
- IsLocked(address oid.Address) (bool, error)
+ IsLocked(ctx context.Context, address oid.Address) (bool, error)
}
// Locker is an object lock storage interface.
@@ -89,7 +90,7 @@ func NewFormatValidator(opts ...FormatValidatorOption) *FormatValidator {
// If unprepared is true, only fields set by user are validated.
//
// Returns nil error if the object has valid structure.
-func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
+func (v *FormatValidator) Validate(ctx context.Context, obj *object.Object, unprepared bool) error {
if obj == nil {
return errNilObject
}
@@ -117,7 +118,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
return fmt.Errorf("(%T) could not validate signature key: %w", v, err)
}
- if err := v.checkExpiration(obj); err != nil {
+ if err := v.checkExpiration(ctx, obj); err != nil {
return fmt.Errorf("object did not pass expiration check: %w", err)
}
@@ -128,7 +129,7 @@ func (v *FormatValidator) Validate(obj *object.Object, unprepared bool) error {
if obj = obj.Parent(); obj != nil {
// Parent object already exists.
- return v.Validate(obj, false)
+ return v.Validate(ctx, obj, false)
}
return nil
@@ -327,7 +328,7 @@ func (v *FormatValidator) fillAndValidateTombstoneMeta(o *object.Object, meta *C
var errExpired = errors.New("object has expired")
-func (v *FormatValidator) checkExpiration(obj *object.Object) error {
+func (v *FormatValidator) checkExpiration(ctx context.Context, obj *object.Object) error {
exp, err := expirationEpochAttribute(obj)
if err != nil {
if errors.Is(err, errNoExpirationEpoch) {
@@ -348,7 +349,7 @@ func (v *FormatValidator) checkExpiration(obj *object.Object) error {
addr.SetContainer(cID)
addr.SetObject(oID)
- locked, err := v.e.IsLocked(addr)
+ locked, err := v.e.IsLocked(ctx, addr)
if err != nil {
return fmt.Errorf("locking status check for an expired object: %w", err)
}
diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go
index 563c7827d..be0602540 100644
--- a/pkg/core/object/fmt_test.go
+++ b/pkg/core/object/fmt_test.go
@@ -1,6 +1,7 @@
package object
import (
+ "context"
"crypto/ecdsa"
"strconv"
"testing"
@@ -40,7 +41,7 @@ type testLockSource struct {
m map[oid.Address]bool
}
-func (t testLockSource) IsLocked(address oid.Address) (bool, error) {
+func (t testLockSource) IsLocked(_ context.Context, address oid.Address) (bool, error) {
return t.m[address], nil
}
@@ -62,20 +63,20 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, err)
t.Run("nil input", func(t *testing.T) {
- require.Error(t, v.Validate(nil, true))
+ require.Error(t, v.Validate(context.Background(), nil, true))
})
t.Run("nil identifier", func(t *testing.T) {
obj := object.New()
- require.ErrorIs(t, v.Validate(obj, false), errNilID)
+ require.ErrorIs(t, v.Validate(context.Background(), obj, false), errNilID)
})
t.Run("nil container identifier", func(t *testing.T) {
obj := object.New()
obj.SetID(oidtest.ID())
- require.ErrorIs(t, v.Validate(obj, true), errNilCID)
+ require.ErrorIs(t, v.Validate(context.Background(), obj, true), errNilCID)
})
t.Run("unsigned object", func(t *testing.T) {
@@ -83,7 +84,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj.SetContainerID(cidtest.ID())
obj.SetID(oidtest.ID())
- require.Error(t, v.Validate(obj, false))
+ require.Error(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/ session token", func(t *testing.T) {
@@ -101,7 +102,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
- require.NoError(t, v.Validate(obj, false))
+ require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("correct w/o session token", func(t *testing.T) {
@@ -109,7 +110,7 @@ func TestFormatValidator_Validate(t *testing.T) {
require.NoError(t, object.SetIDWithSignature(ownerKey.PrivateKey, obj))
- require.NoError(t, v.Validate(obj, false))
+ require.NoError(t, v.Validate(context.Background(), obj, false))
})
t.Run("tombstone content", func(t *testing.T) {
@@ -236,7 +237,7 @@ func TestFormatValidator_Validate(t *testing.T) {
t.Run("invalid attribute value", func(t *testing.T) {
val := "text"
- err := v.Validate(fn(val), false)
+ err := v.Validate(context.Background(), fn(val), false)
require.Error(t, err)
})
@@ -245,7 +246,7 @@ func TestFormatValidator_Validate(t *testing.T) {
obj := fn(val)
t.Run("non-locked", func(t *testing.T) {
- err := v.Validate(obj, false)
+ err := v.Validate(context.Background(), obj, false)
require.ErrorIs(t, err, errExpired)
})
@@ -258,14 +259,14 @@ func TestFormatValidator_Validate(t *testing.T) {
addr.SetObject(oID)
ls.m[addr] = true
- err := v.Validate(obj, false)
+ err := v.Validate(context.Background(), obj, false)
require.NoError(t, err)
})
})
t.Run("alive object", func(t *testing.T) {
val := strconv.FormatUint(curEpoch, 10)
- err := v.Validate(fn(val), true)
+ err := v.Validate(context.Background(), fn(val), true)
require.NoError(t, err)
})
})
diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
index 853628fb4..5deaf5e4a 100644
--- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
+++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go
@@ -88,7 +88,7 @@ func TestBlobovnicza(t *testing.T) {
var dPrm DeletePrm
dPrm.SetAddress(addr)
- _, err := blz.Delete(dPrm)
+ _, err := blz.Delete(context.Background(), dPrm)
require.NoError(t, err)
// should return 404
diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go
index 6ce6f349c..29a587cc9 100644
--- a/pkg/local_object_storage/blobovnicza/delete.go
+++ b/pkg/local_object_storage/blobovnicza/delete.go
@@ -1,10 +1,15 @@
package blobovnicza
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -30,7 +35,13 @@ func (p *DeletePrm) SetAddress(addr oid.Address) {
// Returns an error of type apistatus.ObjectNotFound if the object to be deleted is not in blobovnicza.
//
// Should not be called in read-only configuration.
-func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
+func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovnicza.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
addrKey := addressKey(prm.addr)
removed := false
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
index 202807653..073e69b57 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
@@ -1,13 +1,18 @@
package blobovniczatree
import (
+ "context"
+ "encoding/hex"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -15,7 +20,14 @@ import (
//
// If blobocvnicza ID is specified, only this blobovnicza is processed.
// Otherwise, all Blobovniczas are processed descending weight.
-func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err error) {
+func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res common.DeleteRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
if b.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@@ -30,7 +42,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
return res, err
}
- return b.deleteObject(blz, bPrm, prm)
+ return b.deleteObject(ctx, blz, bPrm, prm)
}
activeCache := make(map[string]struct{})
@@ -42,7 +54,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// don't process active blobovnicza of the level twice
_, ok := activeCache[dirPath]
- res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
+ res, err = b.deleteObjectFromLevel(ctx, bPrm, p, !ok, prm)
if err != nil {
if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
@@ -73,7 +85,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
// tries to delete object from particular blobovnicza.
//
// returns no error if object was removed from some blobovnicza of the same level.
-func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
+func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string, tryActive bool, dp common.DeletePrm) (common.DeleteRes, error) {
lvlPath := filepath.Dir(blzPath)
// try to remove from blobovnicza if it is opened
@@ -81,7 +93,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
- if res, err := b.deleteObject(v, prm, dp); err == nil {
+ if res, err := b.deleteObject(ctx, v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
@@ -100,7 +112,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
b.activeMtx.RUnlock()
if ok && tryActive {
- if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
+ if res, err := b.deleteObject(ctx, active.blz, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
@@ -125,11 +137,11 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
return common.DeleteRes{}, err
}
- return b.deleteObject(blz, prm, dp)
+ return b.deleteObject(ctx, blz, prm, dp)
}
// removes object from blobovnicza and returns common.DeleteRes.
-func (b *Blobovniczas) deleteObject(blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
- _, err := blz.Delete(prm)
+func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm, dp common.DeletePrm) (common.DeleteRes, error) {
+ _, err := blz.Delete(ctx, prm)
return common.DeleteRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index 0c7c61d76..ff927ccbb 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -33,7 +33,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
d, err := obj.Marshal()
require.NoError(t, err)
- putRes, err := b.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
+ putRes, err := b.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
t.Run("valid but wrong storage id", func(t *testing.T) {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
index 8b29119c6..ec302d143 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go
@@ -1,20 +1,31 @@
package blobovniczatree
import (
+ "context"
"errors"
"path/filepath"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
// Put saves object in the maximum weight blobobnicza.
//
// returns error if could not save object in any blobovnicza.
-func (b *Blobovniczas) Put(prm common.PutPrm) (common.PutRes, error) {
+func (b *Blobovniczas) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
if b.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index 738cd7eee..8c6766dc1 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -75,12 +75,12 @@ func TestCompression(t *testing.T) {
testPut := func(t *testing.T, b *BlobStor, i int) {
var prm common.PutPrm
prm.Object = smallObj[i]
- _, err := b.Put(prm)
+ _, err := b.Put(context.Background(), prm)
require.NoError(t, err)
prm = common.PutPrm{}
prm.Object = bigObj[i]
- _, err = b.Put(prm)
+ _, err = b.Put(context.Background(), prm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go
index b5d186242..801d32c1e 100644
--- a/pkg/local_object_storage/blobstor/common/storage.go
+++ b/pkg/local_object_storage/blobstor/common/storage.go
@@ -23,7 +23,7 @@ type Storage interface {
Get(context.Context, GetPrm) (GetRes, error)
GetRange(context.Context, GetRangePrm) (GetRangeRes, error)
Exists(context.Context, ExistsPrm) (ExistsRes, error)
- Put(PutPrm) (PutRes, error)
- Delete(DeletePrm) (DeleteRes, error)
+ Put(context.Context, PutPrm) (PutRes, error)
+ Delete(context.Context, DeletePrm) (DeleteRes, error)
Iterate(IteratePrm) (IterateRes, error)
}
diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go
index 8c5a7aba6..377214fb8 100644
--- a/pkg/local_object_storage/blobstor/delete.go
+++ b/pkg/local_object_storage/blobstor/delete.go
@@ -1,19 +1,31 @@
package blobstor
import (
+ "context"
+ "encoding/hex"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
-func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
+func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.String("storage_id", hex.EncodeToString(prm.StorageID)),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
if prm.StorageID == nil {
for i := range b.storage {
- res, err := b.storage[i].Storage.Delete(prm)
+ res, err := b.storage[i].Storage.Delete(ctx, prm)
if err == nil || !errors.As(err, new(apistatus.ObjectNotFound)) {
if err == nil {
logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID)
@@ -31,7 +43,7 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
st = b.storage[0].Storage
}
- res, err := st.Delete(prm)
+ res, err := st.Delete(ctx, prm)
if err == nil {
logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID)
}
diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go
index 805d78297..f5c5fbbef 100644
--- a/pkg/local_object_storage/blobstor/exists_test.go
+++ b/pkg/local_object_storage/blobstor/exists_test.go
@@ -36,7 +36,7 @@ func TestExists(t *testing.T) {
for i := range objects {
var prm common.PutPrm
prm.Object = objects[i]
- _, err := b.Put(prm)
+ _, err := b.Put(context.Background(), prm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 462fbd63f..8eb0d5be9 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -196,7 +196,13 @@ func (t *FSTree) treePath(addr oid.Address) string {
}
// Delete removes the object with the specified address from the storage.
-func (t *FSTree) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
+func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.DeleteRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ ))
+ defer span.End()
+
if t.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
@@ -230,7 +236,14 @@ func (t *FSTree) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exist
}
// Put puts an object in the storage.
-func (t *FSTree) Put(prm common.PutPrm) (common.PutRes, error) {
+func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
if t.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
index b2663be21..e31f3280a 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/common.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"math/rand"
"testing"
@@ -67,7 +68,7 @@ func prepare(t *testing.T, count int, s common.Storage, min, max uint64) []objec
prm.Object = objects[i].obj
prm.RawData = objects[i].raw
- putRes, err := s.Put(prm)
+ putRes, err := s.Put(context.Background(), prm)
require.NoError(t, err)
objects[i].storageID = putRes.StorageID
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
index 350bea96a..96d54dec3 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go
@@ -36,7 +36,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Object = NewObject(min + uint64(rand.Intn(int(max-min+1))))
prm.Address = objectCore.AddressOf(prm.Object)
- _, err := s.Put(prm)
+ _, err := s.Put(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
t.Run("delete fails", func(t *testing.T) {
@@ -44,7 +44,7 @@ func TestControl(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.ErrorIs(t, err, common.ErrReadOnly)
})
}
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
index ad0045316..7532a5b5f 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go
@@ -22,7 +22,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = oidtest.Address()
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.Error(t, err, new(apistatus.ObjectNotFound))
})
@@ -31,7 +31,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[0].addr
prm.StorageID = objects[0].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
t.Run("exists fail", func(t *testing.T) {
@@ -55,7 +55,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
var prm common.DeletePrm
prm.Address = objects[1].addr
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
})
@@ -64,10 +64,10 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
prm.Address = objects[2].addr
prm.StorageID = objects[2].storageID
- _, err := s.Delete(prm)
+ _, err := s.Delete(context.Background(), prm)
require.NoError(t, err)
- _, err = s.Delete(prm)
+ _, err = s.Delete(context.Background(), prm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
})
diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
index f98cca638..83ada9607 100644
--- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
+++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go
@@ -1,6 +1,7 @@
package blobstortest
import (
+ "context"
"errors"
"testing"
@@ -22,7 +23,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
var delPrm common.DeletePrm
delPrm.Address = objects[2].addr
delPrm.StorageID = objects[2].storageID
- _, err := s.Delete(delPrm)
+ _, err := s.Delete(context.Background(), delPrm)
require.NoError(t, err)
objects = append(objects[:delID], objects[delID+1:]...)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index b2a7ddfb9..6488ff5fc 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -1,6 +1,7 @@
package blobstor
import (
+ "context"
"encoding/binary"
"os"
"testing"
@@ -63,7 +64,7 @@ func TestIterateObjects(t *testing.T) {
}
for _, v := range mObjs {
- _, err := blobStor.Put(common.PutPrm{Address: v.addr, RawData: v.data})
+ _, err := blobStor.Put(context.Background(), common.PutPrm{Address: v.addr, RawData: v.data})
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go
index 4068d742e..e435cfef4 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore.go
@@ -91,7 +91,7 @@ func (s *memstoreImpl) Exists(_ context.Context, req common.ExistsPrm) (common.E
return common.ExistsRes{Exists: exists}, nil
}
-func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
+func (s *memstoreImpl) Put(_ context.Context, req common.PutPrm) (common.PutRes, error) {
if s.readOnly {
return common.PutRes{}, common.ErrReadOnly
}
@@ -108,7 +108,7 @@ func (s *memstoreImpl) Put(req common.PutPrm) (common.PutRes, error) {
return common.PutRes{StorageID: []byte(s.rootPath)}, nil
}
-func (s *memstoreImpl) Delete(req common.DeletePrm) (common.DeleteRes, error) {
+func (s *memstoreImpl) Delete(_ context.Context, req common.DeletePrm) (common.DeleteRes, error) {
if s.readOnly {
return common.DeleteRes{}, common.ErrReadOnly
}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 6482b2cff..125276290 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -28,7 +28,7 @@ func TestSimpleLifecycle(t *testing.T) {
require.NoError(t, err)
{
- _, err := s.Put(common.PutPrm{Address: addr, RawData: d, DontCompress: true})
+ _, err := s.Put(context.Background(), common.PutPrm{Address: addr, RawData: d, DontCompress: true})
require.NoError(t, err)
}
@@ -57,7 +57,7 @@ func TestSimpleLifecycle(t *testing.T) {
}
{
- _, err := s.Delete(common.DeletePrm{Address: addr})
+ _, err := s.Delete(context.Background(), common.DeletePrm{Address: addr})
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index d2359335f..c88dc85ed 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -114,7 +114,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
if err != nil {
return fmt.Errorf("marshal: %v", err)
}
- _, err = st.Put(common.PutPrm{
+ _, err = st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
})
@@ -165,7 +165,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
+ if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
@@ -202,7 +202,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
require.NoError(b, err)
- if _, err := st.Put(common.PutPrm{
+ if _, err := st.Put(context.Background(), common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go
index a4009ae43..2ae7f0fe6 100644
--- a/pkg/local_object_storage/blobstor/put.go
+++ b/pkg/local_object_storage/blobstor/put.go
@@ -1,12 +1,16 @@
package blobstor
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ErrNoPlaceFound is returned when object can't be saved to any sub-storage component
@@ -21,7 +25,14 @@ var ErrNoPlaceFound = logicerr.New("couldn't find a place to store an object")
//
// Returns any error encountered that
// did not allow to completely save the object.
-func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
+func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "BlobStor.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
b.modeMtx.RLock()
defer b.modeMtx.RUnlock()
@@ -39,7 +50,7 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
for i := range b.storage {
if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) {
- res, err := b.storage[i].Storage.Put(prm)
+ res, err := b.storage[i].Storage.Put(ctx, prm)
if err == nil {
logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID)
}
diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go
index 03f64f0f1..24d742fda 100644
--- a/pkg/local_object_storage/blobstor/teststore/teststore.go
+++ b/pkg/local_object_storage/blobstor/teststore/teststore.go
@@ -176,27 +176,27 @@ func (s *TestStore) Exists(ctx context.Context, req common.ExistsPrm) (common.Ex
}
}
-func (s *TestStore) Put(req common.PutPrm) (common.PutRes, error) {
+func (s *TestStore) Put(ctx context.Context, req common.PutPrm) (common.PutRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Put != nil:
return s.overrides.Put(req)
case s.st != nil:
- return s.st.Put(req)
+ return s.st.Put(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Put(%+v)", req))
}
}
-func (s *TestStore) Delete(req common.DeletePrm) (common.DeleteRes, error) {
+func (s *TestStore) Delete(ctx context.Context, req common.DeletePrm) (common.DeleteRes, error) {
s.mu.RLock()
defer s.mu.RUnlock()
switch {
case s.overrides.Delete != nil:
return s.overrides.Delete(req)
case s.st != nil:
- return s.st.Delete(req)
+ return s.st.Delete(ctx, req)
default:
panic(fmt.Sprintf("unexpected storage call: Delete(%+v)", req))
}
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 0c422ccc8..9ad4fcf9c 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -308,7 +308,7 @@ loop:
e.removeShards(shardsToRemove...)
for _, p := range shardsToReload {
- err := p.sh.Reload(p.opts...)
+ err := p.sh.Reload(ctx, p.opts...)
if err != nil {
e.log.Error(logs.EngineCouldNotReloadAShard,
zap.Stringer("shard id", p.sh.ID()),
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 91bec63a6..046968dbe 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -204,7 +204,7 @@ func TestExecBlocks(t *testing.T) {
addr := object.AddressOf(obj)
- require.NoError(t, Put(e, obj))
+ require.NoError(t, Put(context.Background(), e, obj))
// block executions
errBlock := errors.New("block exec err")
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 1f3c142a5..f9b9c9a87 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -4,11 +4,14 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -47,6 +50,13 @@ func (p *DeletePrm) WithForceRemoval() {
// on operations with that object) if WithForceRemoval option has
// been provided.
func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("force_removal", prm.forceRemoval),
+ ))
+ defer span.End()
+
err = e.execIfNotBlocked(func() error {
res, err = e.delete(ctx, prm)
return err
@@ -135,7 +145,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
}
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Select(selectPrm)
+ res, err := sh.Select(ctx, selectPrm)
if err != nil {
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 259a40a7c..53c62981c 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -59,9 +59,9 @@ func TestDeleteBigObject(t *testing.T) {
defer e.Close()
for i := range children {
- require.NoError(t, Put(e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i]))
}
- require.NoError(t, Put(e, link))
+ require.NoError(t, Put(context.Background(), e, link))
var splitErr *objectSDK.SplitInfoError
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index ddaf88d18..4d2ddc100 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -60,7 +60,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
addr := oidtest.Address()
for i := 0; i < 100; i++ {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(e, obj)
+ err := Put(context.Background(), e, obj)
if err != nil {
b.Fatal(err)
}
@@ -69,7 +69,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- ok, err := e.exists(addr)
+ ok, err := e.exists(context.Background(), addr)
if err != nil || ok {
b.Fatalf("%t %v", ok, err)
}
diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go
index 4ff019e4d..017b635d4 100644
--- a/pkg/local_object_storage/engine/error_test.go
+++ b/pkg/local_object_storage/engine/error_test.go
@@ -98,7 +98,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
- _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ _, err := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@@ -132,7 +132,7 @@ func TestErrorReporting(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
- _, err := te.ng.shards[te.shards[0].id.String()].Put(prm)
+ _, err := te.ng.shards[te.shards[0].id.String()].Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
@@ -185,7 +185,7 @@ func TestBlobstorFailback(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
- _, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ _, err = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err)
objs = append(objs, obj)
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index c116aeff9..291bc2b78 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -57,7 +57,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err := e.shards[sh.String()].Put(putPrm)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
require.NoError(t, err)
}
@@ -67,7 +67,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
var putPrm PutPrm
putPrm.WithObject(objects[len(objects)-1])
- _, err := e.Put(putPrm)
+ err := e.Put(context.Background(), putPrm)
require.NoError(t, err)
res, err := e.shards[ids[len(ids)-1].String()].List()
diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go
index 3a8e09a6d..6208461e9 100644
--- a/pkg/local_object_storage/engine/exists.go
+++ b/pkg/local_object_storage/engine/exists.go
@@ -10,14 +10,14 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
-func (e *StorageEngine) exists(addr oid.Address) (bool, error) {
+func (e *StorageEngine) exists(ctx context.Context, addr oid.Address) (bool, error) {
var shPrm shard.ExistsPrm
shPrm.SetAddress(addr)
alreadyRemoved := false
exists := false
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
- res, err := sh.Exists(context.TODO(), shPrm)
+ res, err := sh.Exists(ctx, shPrm)
if err != nil {
if shard.IsErrRemoved(err) {
alreadyRemoved = true
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 7d17b50fa..683b7bde8 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -48,6 +48,12 @@ func (r GetRes) Object() *objectSDK.Object {
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
err = e.execIfNotBlocked(func() error {
res, err = e.get(ctx, prm)
return err
@@ -57,12 +63,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.get",
- trace.WithAttributes(
- attribute.String("address", prm.addr.EncodeToString()),
- ))
- defer span.End()
-
if e.metrics != nil {
defer elapsed(e.metrics.AddGetDuration)()
}
diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go
index e5fd4b04f..bf00c4289 100644
--- a/pkg/local_object_storage/engine/head_test.go
+++ b/pkg/local_object_storage/engine/head_test.go
@@ -55,11 +55,11 @@ func TestHeadRaw(t *testing.T) {
putPrmLink.SetObject(link)
// put most left object in one shard
- _, err := s1.Put(putPrmLeft)
+ _, err := s1.Put(context.Background(), putPrmLeft)
require.NoError(t, err)
// put link object in another shard
- _, err = s2.Put(putPrmLink)
+ _, err = s2.Put(context.Background(), putPrmLink)
require.NoError(t, err)
// head with raw flag should return SplitInfoError
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 696e78742..b1204ed99 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -4,12 +4,15 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -62,6 +65,9 @@ var errInhumeFailure = errors.New("inhume operation failed")
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
+ defer span.End()
+
err = e.execIfNotBlocked(func() error {
res, err = e.inhume(ctx, prm)
return err
@@ -82,7 +88,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
for i := range prm.addrs {
if !prm.forceRemoval {
- locked, err := e.IsLocked(prm.addrs[i])
+ locked, err := e.IsLocked(ctx, prm.addrs[i])
if err != nil {
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
zap.Error(err),
@@ -181,13 +187,19 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh
}
// IsLocked checks whether an object is locked according to StorageEngine's state.
-func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
+func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
var locked bool
var err error
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
- locked, err = h.Shard.IsLocked(addr)
+ locked, err = h.Shard.IsLocked(ctx, addr)
if err != nil {
e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("addr", addr))
outErr = err
@@ -206,7 +218,7 @@ func (e *StorageEngine) IsLocked(addr oid.Address) (bool, error) {
func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- sh.HandleExpiredTombstones(addrs)
+ sh.HandleExpiredTombstones(ctx, addrs)
select {
case <-ctx.Done():
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 4f8c96b99..924cf518b 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -42,7 +42,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close()
- err := Put(e, parent)
+ err := Put(context.Background(), e, parent)
require.NoError(t, err)
var inhumePrm InhumePrm
@@ -51,7 +51,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
@@ -65,12 +65,12 @@ func TestStorageEngine_Inhume(t *testing.T) {
var putChild shard.PutPrm
putChild.SetObject(child)
- _, err := s1.Put(putChild)
+ _, err := s1.Put(context.Background(), putChild)
require.NoError(t, err)
var putLink shard.PutPrm
putLink.SetObject(link)
- _, err = s2.Put(putLink)
+ _, err = s2.Put(context.Background(), putLink)
require.NoError(t, err)
var inhumePrm InhumePrm
@@ -79,7 +79,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 1261de9d4..fde799d05 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"errors"
"os"
"sort"
@@ -35,7 +36,7 @@ func TestListWithCursor(t *testing.T) {
var prm PutPrm
prm.WithObject(obj)
- _, err := e.Put(prm)
+ err := e.Put(context.Background(), prm)
require.NoError(t, err)
expected = append(expected, object.AddressWithType{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go
index 60a1d9c9f..4562c1a57 100644
--- a/pkg/local_object_storage/engine/lock.go
+++ b/pkg/local_object_storage/engine/lock.go
@@ -4,12 +4,15 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var errLockFailed = errors.New("lock operation failed")
@@ -20,19 +23,27 @@ var errLockFailed = errors.New("lock operation failed")
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (e *StorageEngine) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Lock",
+ trace.WithAttributes(
+ attribute.String("container_id", idCnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+
return e.execIfNotBlocked(func() error {
- return e.lock(idCnr, locker, locked)
+ return e.lock(ctx, idCnr, locker, locked)
})
}
-func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
for i := range locked {
- switch e.lockSingle(idCnr, locker, locked[i], true) {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], true) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
- switch e.lockSingle(idCnr, locker, locked[i], false) {
+ switch e.lockSingle(ctx, idCnr, locker, locked[i], false) {
case 1:
return logicerr.Wrap(apistatus.LockNonRegularObject{})
case 0:
@@ -48,7 +59,7 @@ func (e *StorageEngine) lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error
// - 0: fail
// - 1: locking irregular object
// - 2: ok
-func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
+func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) {
// code is pretty similar to inhumeAddr, maybe unify?
root := false
var errIrregular apistatus.LockNonRegularObject
@@ -70,7 +81,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
var existsPrm shard.ExistsPrm
existsPrm.SetAddress(addrLocked)
- exRes, err := sh.Exists(context.TODO(), existsPrm)
+ exRes, err := sh.Exists(ctx, existsPrm)
if err != nil {
var siErr *objectSDK.SplitInfoError
if !errors.As(err, &siErr) {
@@ -90,7 +101,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi
}
}
- err := sh.Lock(idCnr, locker, []oid.ID{locked})
+ err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked})
if err != nil {
e.reportShardError(sh, "could not lock object in shard", err)
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index fd3b04ef0..4c89b9226 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -99,7 +99,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -107,10 +107,10 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
object.WriteLock(lockerObj, locker)
- err = Put(e, lockerObj)
+ err = Put(context.Background(), e, lockerObj)
require.NoError(t, err)
- err = e.Lock(cnr, lockerID, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
require.NoError(t, err)
// 3.
@@ -125,7 +125,7 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(e, tombObj)
+ err = Put(context.Background(), e, tombObj)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
@@ -180,7 +180,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
@@ -192,13 +192,13 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(object.TypeLock)
lock.SetAttributes(a)
- err = Put(e, lock)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
- err = e.Lock(cnr, idLock, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
var inhumePrm InhumePrm
@@ -255,20 +255,20 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(e, obj)
+ err = Put(context.Background(), e, obj)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(object.TypeLock)
- err = Put(e, lock)
+ err = Put(context.Background(), e, lock)
require.NoError(t, err)
id, _ := obj.ID()
idLock, _ := lock.ID()
- err = e.Lock(cnr, idLock, []oid.ID{id})
+ err = e.Lock(context.Background(), cnr, idLock, []oid.ID{id})
require.NoError(t, err)
// 3.
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index aea296cc4..0543f9f15 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -12,6 +13,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -20,9 +23,6 @@ type PutPrm struct {
obj *objectSDK.Object
}
-// PutRes groups the resulting values of Put operation.
-type PutRes struct{}
-
var errPutShard = errors.New("could not put object to any shard")
// WithObject is a Put option to set object to save.
@@ -40,16 +40,22 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
// Returns an error if executions are blocked (see BlockExecution).
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if the object has been marked as removed.
-func (e *StorageEngine) Put(prm PutPrm) (res PutRes, err error) {
+func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
+ trace.WithAttributes(
+ attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
+ ))
+ defer span.End()
+
err = e.execIfNotBlocked(func() error {
- res, err = e.put(prm)
+ err = e.put(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
+func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
if e.metrics != nil {
defer elapsed(e.metrics.AddPutDuration)()
}
@@ -58,9 +64,9 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
- _, err := e.exists(addr)
+ _, err := e.exists(ctx, addr)
if err != nil {
- return PutRes{}, err
+ return err
}
finished := false
@@ -74,7 +80,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
return false
}
- putDone, exists := e.putToShard(context.TODO(), sh, ind, pool, addr, prm.obj)
+ putDone, exists := e.putToShard(ctx, sh, ind, pool, addr, prm.obj)
finished = putDone || exists
return finished
})
@@ -83,7 +89,7 @@ func (e *StorageEngine) put(prm PutPrm) (PutRes, error) {
err = errPutShard
}
- return PutRes{}, err
+ return err
}
// putToShard puts object to sh.
@@ -117,7 +123,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var toMoveItPrm shard.ToMoveItPrm
toMoveItPrm.SetAddress(addr)
- _, err = sh.ToMoveIt(toMoveItPrm)
+ _, err = sh.ToMoveIt(ctx, toMoveItPrm)
if err != nil {
e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
zap.Stringer("shard", sh.ID()),
@@ -132,7 +138,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(ctx, putPrm)
if err != nil {
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
@@ -157,11 +163,9 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
}
// Put writes provided object to local storage.
-func Put(storage *StorageEngine, obj *objectSDK.Object) error {
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
var putPrm PutPrm
putPrm.WithObject(obj)
- _, err := storage.Put(putPrm)
-
- return err
+ return storage.Put(ctx, putPrm)
}
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index d365fc7b4..1ea569928 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -129,7 +129,7 @@ func (e *StorageEngine) removeObjects(ctx context.Context, ch <-chan oid.Address
var deletePrm shard.DeletePrm
deletePrm.SetAddresses(addr)
- _, err = shards[i].Delete(deletePrm)
+ _, err = shards[i].Delete(ctx, deletePrm)
if err != nil {
return err
}
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
index 4415d01c8..c53e03bbf 100644
--- a/pkg/local_object_storage/engine/remove_copies_test.go
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -49,10 +49,10 @@ func TestRebalance(t *testing.T) {
te.ng.mtx.RLock()
// Every 3rd object (i%3 == 0) is put to both shards, others are distributed.
if i%3 != 1 {
- _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
+ _, err1 = te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
}
if i%3 != 2 {
- _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ _, err2 = te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
}
te.ng.mtx.RUnlock()
@@ -109,8 +109,8 @@ func TestRebalanceSingleThread(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(obj)
te.ng.mtx.RLock()
- _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
- _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
require.NoError(t, err2)
@@ -162,8 +162,8 @@ func TestRebalanceExitByContext(t *testing.T) {
prm.SetObject(objects[i])
te.ng.mtx.RLock()
- _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(prm)
- _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(prm)
+ _, err1 := te.ng.shards[te.shards[0].id.String()].Shard.Put(context.Background(), prm)
+ _, err2 := te.ng.shards[te.shards[1].id.String()].Shard.Put(context.Background(), prm)
te.ng.mtx.RUnlock()
require.NoError(t, err1)
diff --git a/pkg/local_object_storage/engine/restore.go b/pkg/local_object_storage/engine/restore.go
index 84c750cd0..7cc2eaf6c 100644
--- a/pkg/local_object_storage/engine/restore.go
+++ b/pkg/local_object_storage/engine/restore.go
@@ -1,11 +1,24 @@
package engine
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
// RestoreShard restores objects from dump to the shard with provided identifier.
//
// Returns an error if shard is not read-only.
-func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
+func (e *StorageEngine) RestoreShard(ctx context.Context, id *shard.ID, prm shard.RestorePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.RestoreShard",
+ trace.WithAttributes(
+ attribute.String("shard_id", id.String()),
+ ))
+ defer span.End()
+
e.mtx.RLock()
defer e.mtx.RUnlock()
@@ -14,6 +27,6 @@ func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error {
return errShardNotFound
}
- _, err := sh.Restore(prm)
+ _, err := sh.Restore(ctx, prm)
return err
}
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 7b9b8be60..e1039ea23 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -1,10 +1,15 @@
package engine
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
@@ -38,16 +43,22 @@ func (r SelectRes) AddressList() []oid.Address {
// Returns any error encountered that did not allow to completely select the objects.
//
// Returns an error if executions are blocked (see BlockExecution).
-func (e *StorageEngine) Select(prm SelectPrm) (res SelectRes, err error) {
+func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Select",
+ trace.WithAttributes(
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+
err = e.execIfNotBlocked(func() error {
- res, err = e._select(prm)
+ res, err = e._select(ctx, prm)
return err
})
return
}
-func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
+func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
if e.metrics != nil {
defer elapsed(e.metrics.AddSearchDuration)()
}
@@ -62,7 +73,7 @@ func (e *StorageEngine) _select(prm SelectPrm) (SelectRes, error) {
shPrm.SetFilters(prm.filters)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
- res, err := sh.Select(shPrm)
+ res, err := sh.Select(ctx, shPrm)
if err != nil {
e.reportShardError(sh, "could not select objects from shard", err)
return false
@@ -133,12 +144,12 @@ func (e *StorageEngine) list(limit uint64) (SelectRes, error) {
}
// Select selects objects from local storage using provided filters.
-func Select(storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs object.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
selectPrm.WithContainerID(cnr)
selectPrm.WithFilters(fs)
- res, err := storage.Select(selectPrm)
+ res, err := storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 611c691f1..77573c9e6 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -1,6 +1,7 @@
package engine
import (
+ "context"
"strconv"
"testing"
@@ -31,7 +32,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := 0; i < objCount; i++ {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(te.ng, obj)
+ err := Put(context.Background(), te.ng, obj)
if err != nil {
b.Fatal(err)
}
@@ -51,7 +52,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
prm.WithFilters(fs)
for i := 0; i < b.N; i++ {
- res, err := te.ng.Select(prm)
+ res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 26600a3eb..4effb2b16 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -1,7 +1,12 @@
package engine
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// FlushWriteCachePrm groups the parameters of FlushWriteCache operation.
@@ -26,7 +31,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
type FlushWriteCacheRes struct{}
// FlushWriteCache flushes write-cache on a single shard.
-func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
+func (e *StorageEngine) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) (FlushWriteCacheRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.FlushWriteCache",
+ trace.WithAttributes(
+ attribute.String("shard)id", p.shardID.String()),
+ attribute.Bool("ignore_errors", p.ignoreErrors),
+ ))
+ defer span.End()
+
e.mtx.RLock()
sh, ok := e.shards[p.shardID.String()]
e.mtx.RUnlock()
@@ -38,5 +50,5 @@ func (e *StorageEngine) FlushWriteCache(p FlushWriteCachePrm) (FlushWriteCacheRe
var prm shard.FlushWriteCachePrm
prm.SetIgnoreErrors(p.ignoreErrors)
- return FlushWriteCacheRes{}, sh.FlushWriteCache(prm)
+ return FlushWriteCacheRes{}, sh.FlushWriteCache(ctx, prm)
}
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index 17f3b3893..b67e748b3 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -55,6 +56,6 @@ func metaExists(db *meta.DB, addr oid.Address) (bool, error) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
- res, err := db.Exists(existsPrm)
+ res, err := db.Exists(context.Background(), existsPrm)
return res.Exists(), err
}
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index d93bc436b..17a593b6d 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -38,7 +39,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objCount; i++ {
prm.SetObject(oo[i])
- _, err = db.Put(prm)
+ _, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@@ -58,7 +59,7 @@ func TestCounters(t *testing.T) {
for i := objCount - 1; i >= 0; i-- {
prm.SetAddresses(objectcore.AddressOf(oo[i]))
- res, err := db.Delete(prm)
+ res, err := db.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
@@ -89,7 +90,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
- res, err := db.Inhume(prm)
+ res, err := db.Inhume(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
@@ -159,7 +160,7 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
@@ -223,7 +224,7 @@ func TestCounters_Expired(t *testing.T) {
inhumePrm.SetGCMark()
inhumePrm.SetAddresses(oo[0])
- inhumeRes, err := db.Inhume(inhumePrm)
+ inhumeRes, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), inhumeRes.AvailableInhumed())
@@ -240,7 +241,7 @@ func TestCounters_Expired(t *testing.T) {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(oo[0])
- deleteRes, err := db.Delete(deletePrm)
+ deleteRes, err := db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Zero(t, deleteRes.AvailableObjectsRemoved())
@@ -257,7 +258,7 @@ func TestCounters_Expired(t *testing.T) {
deletePrm.SetAddresses(oo[0])
- deleteRes, err = db.Delete(deletePrm)
+ deleteRes, err = db.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.Equal(t, uint64(1), deleteRes.AvailableObjectsRemoved())
@@ -284,7 +285,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*object.Ob
oo = append(oo, o)
prm.SetObject(o)
- _, err = db.Put(prm)
+ _, err = db.Put(context.Background(), prm)
require.NoError(t, err)
c, err := db.ObjectCounters()
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 79f870372..990d3997b 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -2,15 +2,19 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// DeletePrm groups the parameters of Delete operation.
@@ -65,7 +69,13 @@ type referenceNumber struct {
type referenceCounter map[string]*referenceNumber
// Delete removed object records from metabase indexes.
-func (db *DB) Delete(prm DeletePrm) (DeleteRes, error) {
+func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Delete",
+ trace.WithAttributes(
+ attribute.Int("addr_count", len(prm.addrs)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index ee161a881..d2a4bfa7b 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"errors"
"testing"
@@ -139,6 +140,6 @@ func metaDelete(db *meta.DB, addrs ...oid.Address) error {
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(addrs...)
- _, err := db.Delete(deletePrm)
+ _, err := db.Delete(context.Background(), deletePrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go
index 686b65880..cfd37b0d2 100644
--- a/pkg/local_object_storage/metabase/exists.go
+++ b/pkg/local_object_storage/metabase/exists.go
@@ -1,15 +1,19 @@
package meta
import (
+ "context"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@@ -39,7 +43,13 @@ func (p ExistsRes) Exists() bool {
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Exists(prm ExistsPrm) (res ExistsRes, err error) {
+func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Exists",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go
index c0feda06c..fff32d6ad 100644
--- a/pkg/local_object_storage/metabase/get.go
+++ b/pkg/local_object_storage/metabase/get.go
@@ -1,14 +1,18 @@
package meta
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// GetPrm groups the parameters of Get operation.
@@ -46,7 +50,14 @@ func (r GetRes) Header() *objectSDK.Object {
// Returns an error of type apistatus.ObjectNotFound if object is missing in DB.
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Get(prm GetPrm) (res GetRes, err error) {
+func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Get",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ attribute.Bool("raw", prm.raw),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index a242a099a..4d2a7682d 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -2,6 +2,7 @@ package meta_test
import (
"bytes"
+ "context"
"fmt"
"os"
"runtime"
@@ -132,7 +133,7 @@ func TestDB_Get(t *testing.T) {
var prm meta.InhumePrm
prm.SetAddresses(obj)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
_, err = metaGet(db, obj, false)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
@@ -216,7 +217,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
getPrm.SetAddress(addrs[counter%len(addrs)])
counter++
- _, err := db.Get(getPrm)
+ _, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@@ -235,7 +236,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
- _, err := db.Get(getPrm)
+ _, err := db.Get(context.Background(), getPrm)
if err != nil {
b.Fatal(err)
}
@@ -248,6 +249,6 @@ func metaGet(db *meta.DB, addr oid.Address, raw bool) (*objectSDK.Object, error)
prm.SetAddress(addr)
prm.SetRaw(raw)
- res, err := db.Get(prm)
+ res, err := db.Get(context.Background(), prm)
return res.Header(), err
}
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index b8b665541..8cd09e3f7 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -68,7 +69,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1))
inhumePrm.SetGCMark()
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var counter int
@@ -138,14 +139,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
// inhume with GC mark
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var (
@@ -225,7 +226,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGraveyard := []oid.Address{
@@ -320,7 +321,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
object.AddressOf(obj3), object.AddressOf(obj4))
inhumePrm.SetGCMark()
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
expectedGarbage := []oid.Address{
@@ -404,7 +405,7 @@ func TestDB_DropGraves(t *testing.T) {
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
buriedTS := make([]meta.TombstonedObject, 0)
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index b6e6cadf1..a6887a33b 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -2,9 +2,11 @@ package meta
import (
"bytes"
+ "context"
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -118,7 +120,10 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal")
//
// NOTE: Marks any object with GC mark (despite any prohibitions on operations
// with that object) if WithForceGCMark option has been provided.
-func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) {
+func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Inhume")
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index b7ee5ef29..0f0774227 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -50,40 +51,40 @@ func TestInhumeTombOnTomb(t *testing.T) {
inhumePrm.SetTombstoneAddress(addr2)
// inhume addr1 via addr2
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
// addr1 should become inhumed {addr1:addr2}
- _, err = db.Exists(existsPrm)
+ _, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr3)
inhumePrm.SetTombstoneAddress(addr1)
// try to inhume addr3 via addr1
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
// record with {addr1:addr2} should be removed from graveyard
// as a tomb-on-tomb; metabase should return ObjectNotFound
// NOT ObjectAlreadyRemoved since that record has been removed
// from graveyard but addr1 is still marked with GC
- _, err = db.Exists(existsPrm)
+ _, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
existsPrm.SetAddress(addr3)
// addr3 should be inhumed {addr3: addr1}
- _, err = db.Exists(existsPrm)
+ _, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved))
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(oidtest.Address())
// try to inhume addr1 (which is already a tombstone in graveyard)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
existsPrm.SetAddress(addr1)
@@ -91,7 +92,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
// record with addr1 key should not appear in graveyard
// (tomb can not be inhumed) but should be kept as object
// with GC mark
- _, err = db.Exists(existsPrm)
+ _, err = db.Exists(context.Background(), existsPrm)
require.ErrorAs(t, err, new(apistatus.ObjectNotFound))
}
@@ -100,13 +101,13 @@ func TestInhumeLocked(t *testing.T) {
locked := oidtest.Address()
- err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
+ err := db.Lock(context.Background(), locked.Container(), oidtest.ID(), []oid.ID{locked.Object()})
require.NoError(t, err)
var prm meta.InhumePrm
prm.SetAddresses(locked)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
var e apistatus.ObjectLocked
require.ErrorAs(t, err, &e)
@@ -117,6 +118,6 @@ func metaInhume(db *meta.DB, target, tomb oid.Address) error {
inhumePrm.SetAddresses(target)
inhumePrm.SetTombstoneAddress(tomb)
- _, err := db.Inhume(inhumePrm)
+ _, err := db.Inhume(context.Background(), inhumePrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 6b3a3612d..e7d6ad04f 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"strconv"
"testing"
@@ -34,7 +35,7 @@ func TestDB_IterateExpired(t *testing.T) {
expiredLocked := putWithExpiration(t, db, object.TypeRegular, epoch-1)
- require.NoError(t, db.Lock(expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
+ require.NoError(t, db.Lock(context.Background(), expiredLocked.Container(), oidtest.ID(), []oid.ID{expiredLocked.Object()}))
err := db.IterateExpired(epoch, func(exp *meta.ExpiredObject) error {
if addr, ok := mAlive[exp.Type()]; ok {
@@ -81,13 +82,13 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
prm.SetAddresses(protected1, protected2, protectedLocked)
prm.SetTombstoneAddress(ts)
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
prm.SetAddresses(garbage)
prm.SetGCMark()
- _, err = db.Inhume(prm)
+ _, err = db.Inhume(context.Background(), prm)
require.NoError(t, err)
var handled []oid.Address
@@ -107,7 +108,7 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
require.Contains(t, handled, protected2)
require.Contains(t, handled, protectedLocked)
- err = db.Lock(protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
+ err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()})
require.NoError(t, err)
handled = handled[:0]
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 2dcc85d40..5c3c9720d 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -2,14 +2,18 @@ package meta
import (
"bytes"
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var bucketNameLocked = []byte{lockedPrefix}
@@ -30,7 +34,15 @@ func bucketNameLockers(idCnr cid.ID, key []byte) []byte {
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (db *DB) Lock(cnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Lock",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
@@ -266,7 +278,13 @@ func (i IsLockedRes) Locked() bool {
// object is considered as non-locked.
//
// Returns only non-logical errors related to underlying database.
-func (db *DB) IsLocked(prm IsLockedPrm) (res IsLockedRes, err error) {
+func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IsLocked",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index efa9fba06..1d6ea6ffb 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -20,8 +21,8 @@ func TestDB_Lock(t *testing.T) {
db := newDB(t)
t.Run("empty locked list", func(t *testing.T) {
- require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, nil) })
- require.Panics(t, func() { _ = db.Lock(cnr, oid.ID{}, []oid.ID{}) })
+ require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
+ require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, []oid.ID{}) })
})
t.Run("(ir)regular", func(t *testing.T) {
@@ -44,7 +45,7 @@ func TestDB_Lock(t *testing.T) {
id, _ := obj.ID()
// try to lock it
- err = db.Lock(cnr, oidtest.ID(), []oid.ID{id})
+ err = db.Lock(context.Background(), cnr, oidtest.ID(), []oid.ID{id})
if typ == object.TypeRegular {
require.NoError(t, err, typ)
} else {
@@ -65,27 +66,27 @@ func TestDB_Lock(t *testing.T) {
// check locking relation
inhumePrm.SetAddresses(objAddr)
- _, err := db.Inhume(inhumePrm)
+ _, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
// try to remove lock object
inhumePrm.SetAddresses(lockAddr)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.Error(t, err)
// check that locking relation has not been
// dropped
inhumePrm.SetAddresses(objAddr)
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
})
@@ -105,7 +106,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetLockObjectHandling()
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@@ -117,7 +118,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
// now we can inhume the object
- _, err = db.Inhume(inhumePrm)
+ _, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
})
@@ -134,7 +135,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
inhumePrm.SetLockObjectHandling()
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 1)
require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0])
@@ -151,7 +152,7 @@ func TestDB_Lock(t *testing.T) {
for i := 0; i < objsNum; i++ {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
- res, err = db.Inhume(inhumePrm)
+ res, err = db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
}
@@ -164,7 +165,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetForceGCMark()
inhumePrm.SetAddresses(objectcore.AddressOf(lockObj))
- res, err := db.Inhume(inhumePrm)
+ res, err := db.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
require.Len(t, res.DeletedLockObjects(), 0)
})
@@ -184,7 +185,7 @@ func TestDB_Lock_Expired(t *testing.T) {
require.ErrorIs(t, err, meta.ErrObjectIsExpired)
// lock the obj
- require.NoError(t, db.Lock(addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
+ require.NoError(t, db.Lock(context.Background(), addr.Container(), oidtest.ID(), []oid.ID{addr.Object()}))
// object is expired but locked, thus, must be available
_, err = metaGet(db, addr, false)
@@ -202,7 +203,7 @@ func TestDB_IsLocked(t *testing.T) {
for _, obj := range objs {
prm.SetAddress(objectcore.AddressOf(obj))
- res, err := db.IsLocked(prm)
+ res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.True(t, res.Locked())
@@ -212,7 +213,7 @@ func TestDB_IsLocked(t *testing.T) {
prm.SetAddress(oidtest.Address())
- res, err := db.IsLocked(prm)
+ res, err := db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@@ -224,12 +225,12 @@ func TestDB_IsLocked(t *testing.T) {
var putPrm meta.PutPrm
putPrm.SetObject(obj)
- _, err = db.Put(putPrm)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
prm.SetAddress(objectcore.AddressOf(obj))
- res, err = db.IsLocked(prm)
+ res, err = db.IsLocked(context.Background(), prm)
require.NoError(t, err)
require.False(t, res.Locked())
@@ -260,7 +261,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*object.Ob
err := putBig(db, lockObj)
require.NoError(t, err)
- err = db.Lock(cnr, lockID, lockedObjIDs)
+ err = db.Lock(context.Background(), cnr, lockID, lockedObjIDs)
require.NoError(t, err)
return lockedObjs, lockObj
diff --git a/pkg/local_object_storage/metabase/movable.go b/pkg/local_object_storage/metabase/movable.go
index e6990dc54..412c46393 100644
--- a/pkg/local_object_storage/metabase/movable.go
+++ b/pkg/local_object_storage/metabase/movable.go
@@ -1,10 +1,14 @@
package meta
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ToMoveItPrm groups the parameters of ToMoveIt operation.
@@ -48,7 +52,13 @@ func (p MovableRes) AddressList() []oid.Address {
// ToMoveIt marks objects to move it into another shard. This useful for
// faster HRW fetching.
-func (db *DB) ToMoveIt(prm ToMoveItPrm) (res ToMoveItRes, err error) {
+func (db *DB) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (res ToMoveItRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.ToMoveIt",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/movable_test.go b/pkg/local_object_storage/metabase/movable_test.go
index 6918dec29..51e7e6d74 100644
--- a/pkg/local_object_storage/metabase/movable_test.go
+++ b/pkg/local_object_storage/metabase/movable_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -61,7 +62,7 @@ func metaToMoveIt(db *meta.DB, addr oid.Address) error {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(addr)
- _, err := db.ToMoveIt(toMovePrm)
+ _, err := db.ToMoveIt(context.Background(), toMovePrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index b0fea6535..2c78bda0f 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -1,11 +1,13 @@
package meta
import (
+ "context"
"encoding/binary"
"errors"
"fmt"
gio "io"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util"
@@ -14,6 +16,8 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type (
@@ -52,7 +56,13 @@ var (
//
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been placed in graveyard.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
-func (db *DB) Put(prm PutPrm) (res PutRes, err error) {
+func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Put",
+ trace.WithAttributes(
+ attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 837d931ae..a3a071d19 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"runtime"
"strconv"
"testing"
@@ -117,7 +118,7 @@ func metaPut(db *meta.DB, obj *objectSDK.Object, id []byte) error {
putPrm.SetObject(obj)
putPrm.SetStorageID(id)
- _, err := db.Put(putPrm)
+ _, err := db.Put(context.Background(), putPrm)
return err
}
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 74c261d35..ecd83f863 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -1,17 +1,21 @@
package meta
import (
+ "context"
"encoding/binary"
"errors"
"fmt"
"strings"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -56,7 +60,13 @@ func (r SelectRes) AddressList() []oid.Address {
}
// Select returns list of addresses of objects that match search filters.
-func (db *DB) Select(prm SelectPrm) (res SelectRes, err error) {
+func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.Select",
+ trace.WithAttributes(
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 5d4cc75e5..386797529 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"encoding/hex"
"strconv"
"testing"
@@ -829,7 +830,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
prm.SetFilters(fs)
for i := 0; i < b.N; i++ {
- res, err := db.Select(prm)
+ res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
}
@@ -844,6 +845,6 @@ func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.A
prm.SetFilters(fs)
prm.SetContainerID(cnr)
- res, err := db.Select(prm)
+ res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
}
diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go
index ae309d4b2..794879a3f 100644
--- a/pkg/local_object_storage/metabase/storage_id.go
+++ b/pkg/local_object_storage/metabase/storage_id.go
@@ -1,11 +1,15 @@
package meta
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// StorageIDPrm groups the parameters of StorageID operation.
@@ -30,7 +34,13 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
-func (db *DB) StorageID(prm StorageIDPrm) (res StorageIDRes, err error) {
+func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
+ trace.WithAttributes(
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index f8185abee..5b27cdc87 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -1,6 +1,7 @@
package meta_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -63,6 +64,6 @@ func metaStorageID(db *meta.DB, addr oid.Address) ([]byte, error) {
var sidPrm meta.StorageIDPrm
sidPrm.SetAddress(addr)
- r, err := db.StorageID(sidPrm)
+ r, err := db.StorageID(context.Background(), sidPrm)
return r.StorageID(), err
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 3d0f72922..e74f235f8 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -80,7 +81,10 @@ func (s *Shard) Open() error {
type metabaseSynchronizer Shard
func (x *metabaseSynchronizer) Init() error {
- return (*Shard)(x).refillMetabase()
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init")
+ defer span.End()
+
+ return (*Shard)(x).refillMetabase(ctx)
}
// Init initializes all Shard's components.
@@ -158,7 +162,7 @@ func (s *Shard) Init(ctx context.Context) error {
return nil
}
-func (s *Shard) refillMetabase() error {
+func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
return fmt.Errorf("could not reset metabase: %w", err)
@@ -177,9 +181,9 @@ func (s *Shard) refillMetabase() error {
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
- err = s.refillTombstoneObject(obj)
+ err = s.refillTombstoneObject(ctx, obj)
case objectSDK.TypeLock:
- err = s.refillLockObject(obj)
+ err = s.refillLockObject(ctx, obj)
default:
}
if err != nil {
@@ -190,7 +194,7 @@ func (s *Shard) refillMetabase() error {
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
- _, err = s.metaBase.Put(mPrm)
+ _, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !meta.IsErrRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
return err
}
@@ -209,7 +213,7 @@ func (s *Shard) refillMetabase() error {
return nil
}
-func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
+func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
return fmt.Errorf("could not unmarshal lock content: %w", err)
@@ -220,14 +224,14 @@ func (s *Shard) refillLockObject(obj *objectSDK.Object) error {
cnr, _ := obj.ContainerID()
id, _ := obj.ID()
- err := s.metaBase.Lock(cnr, id, locked)
+ err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
return fmt.Errorf("could not lock objects: %w", err)
}
return nil
}
-func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
+func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object) error {
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
@@ -250,7 +254,7 @@ func (s *Shard) refillTombstoneObject(obj *objectSDK.Object) error {
inhumePrm.SetTombstoneAddress(tombAddr)
inhumePrm.SetAddresses(tombMembers...)
- _, err := s.metaBase.Inhume(inhumePrm)
+ _, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
return fmt.Errorf("could not inhume objects: %w", err)
}
@@ -290,7 +294,10 @@ func (s *Shard) Close() error {
// Reload reloads configuration portions that are necessary.
// If a config option is invalid, it logs an error and returns nil.
// If there was a problem with applying new configuration, an error is returned.
-func (s *Shard) Reload(opts ...Option) error {
+func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Reload")
+ defer span.End()
+
// Do not use defaultCfg here missing options need not be reloaded.
var c cfg
for i := range opts {
@@ -314,7 +321,7 @@ func (s *Shard) Reload(opts ...Option) error {
// Here we refill metabase only if a new instance was opened. This is a feature,
// we don't want to hang for some time just because we forgot to change
// config after the node was updated.
- err = s.refillMetabase()
+ err = s.refillMetabase(ctx)
} else {
err = s.metaBase.Init()
}
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 50ea20bb8..170052d63 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -126,6 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}
sh := New(
+ WithID(NewIDFromBytes([]byte{})),
WithBlobStorOptions(blobOpts...),
WithPiloramaOptions(pilorama.WithPath(filepath.Join(dir, "pilorama"))),
WithMetaBaseOptions(meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(epochState{})))
@@ -138,12 +139,12 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
var putPrm PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
require.NoError(t, sh.Close())
addr := object.AddressOf(obj)
- _, err = fsTree.Put(common.PutPrm{Address: addr, RawData: []byte("not an object")})
+ _, err = fsTree.Put(context.Background(), common.PutPrm{Address: addr, RawData: []byte("not an object")})
require.NoError(t, err)
sh = New(
@@ -245,13 +246,13 @@ func TestRefillMetabase(t *testing.T) {
for _, v := range mObjs {
putPrm.SetObject(v.obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
putPrm.SetObject(tombObj)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// LOCK object handling
@@ -263,11 +264,11 @@ func TestRefillMetabase(t *testing.T) {
objectSDK.WriteLock(lockObj, lock)
putPrm.SetObject(lockObj)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
lockID, _ := lockObj.ID()
- require.NoError(t, sh.Lock(cnrLocked, lockID, locked))
+ require.NoError(t, sh.Lock(context.Background(), cnrLocked, lockID, locked))
var inhumePrm InhumePrm
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
@@ -368,7 +369,7 @@ func TestRefillMetabase(t *testing.T) {
checkObj(object.AddressOf(tombObj), nil)
checkTombMembers(false)
- err = sh.refillMetabase()
+ err = sh.refillMetabase(context.Background())
require.NoError(t, err)
c, err = sh.metaBase.ObjectCounters()
diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go
index ed05f9982..f086aa30f 100644
--- a/pkg/local_object_storage/shard/delete.go
+++ b/pkg/local_object_storage/shard/delete.go
@@ -1,13 +1,17 @@
package shard
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -28,14 +32,21 @@ func (p *DeletePrm) SetAddresses(addr ...oid.Address) {
// Delete removes data from the shard's writeCache, metaBase and
// blobStor.
-func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) {
+func (s *Shard) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Delete",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Int("addr_count", len(prm.addr)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
- return s.delete(prm)
+ return s.delete(ctx, prm)
}
-func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
+func (s *Shard) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
if s.info.Mode.ReadOnly() {
return DeleteRes{}, ErrReadOnlyMode
} else if s.info.Mode.NoMetabase() {
@@ -48,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
for i := range prm.addr {
if s.hasWriteCache() {
- err := s.writeCache.Delete(prm.addr[i])
+ err := s.writeCache.Delete(ctx, prm.addr[i])
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error()))
}
@@ -57,7 +68,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var sPrm meta.StorageIDPrm
sPrm.SetAddress(prm.addr[i])
- res, err := s.metaBase.StorageID(sPrm)
+ res, err := s.metaBase.StorageID(ctx, sPrm)
if err != nil {
s.log.Debug(logs.ShardCantGetStorageIDFromMetabase,
zap.Stringer("object", prm.addr[i]),
@@ -74,7 +85,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
var delPrm meta.DeletePrm
delPrm.SetAddresses(prm.addr...)
- res, err := s.metaBase.Delete(delPrm)
+ res, err := s.metaBase.Delete(ctx, delPrm)
if err != nil {
return DeleteRes{}, err // stop on metabase error ?
}
@@ -99,7 +110,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
id := smalls[prm.addr[i]]
delPrm.StorageID = id
- _, err = s.blobStor.Delete(delPrm)
+ _, err = s.blobStor.Delete(ctx, delPrm)
if err != nil {
s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor,
zap.Stringer("object_address", prm.addr[i]),
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index c37dfa285..9646e9aa0 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -43,13 +43,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)
require.NoError(t, err)
- _, err = sh.Delete(delPrm)
+ _, err = sh.Delete(context.TODO(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
@@ -67,13 +67,13 @@ func testShardDelete(t *testing.T, hasWriteCache bool) {
var delPrm shard.DeletePrm
delPrm.SetAddresses(object.AddressOf(obj))
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
- _, err = sh.Delete(delPrm)
+ _, err = sh.Delete(context.Background(), delPrm)
require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go
index 9d585cc06..921717204 100644
--- a/pkg/local_object_storage/shard/dump_test.go
+++ b/pkg/local_object_storage/shard/dump_test.go
@@ -104,7 +104,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var prm shard.PutPrm
prm.SetObject(objects[i])
- _, err := sh.Put(prm)
+ _, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@@ -129,13 +129,13 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("empty dump", func(t *testing.T) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(outEmpty)
- res, err := sh.Restore(restorePrm)
+ res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, 0, res.Count())
})
t.Run("invalid path", func(t *testing.T) {
- _, err := sh.Restore(*new(shard.RestorePrm))
+ _, err := sh.Restore(context.Background(), *new(shard.RestorePrm))
require.ErrorIs(t, err, os.ErrNotExist)
})
@@ -147,7 +147,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
- _, err := sh.Restore(restorePrm)
+ _, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, shard.ErrInvalidMagic)
})
@@ -162,7 +162,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
- _, err := sh.Restore(restorePrm)
+ _, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
})
t.Run("incomplete object data", func(t *testing.T) {
@@ -173,7 +173,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
- _, err := sh.Restore(restorePrm)
+ _, err := sh.Restore(context.Background(), restorePrm)
require.ErrorIs(t, err, io.EOF)
})
t.Run("invalid object", func(t *testing.T) {
@@ -184,7 +184,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
var restorePrm shard.RestorePrm
restorePrm.WithPath(out)
- _, err := sh.Restore(restorePrm)
+ _, err := sh.Restore(context.Background(), restorePrm)
require.Error(t, err)
t.Run("skip errors", func(t *testing.T) {
@@ -195,7 +195,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
restorePrm.WithPath(out)
restorePrm.WithIgnoreErrors(true)
- res, err := sh.Restore(restorePrm)
+ res, err := sh.Restore(context.Background(), restorePrm)
require.NoError(t, err)
require.Equal(t, objCount, res.Count())
require.Equal(t, 2, res.FailCount())
@@ -208,7 +208,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
t.Run("must allow write", func(t *testing.T) {
require.NoError(t, sh.SetMode(mode.ReadOnly))
- _, err := sh.Restore(prm)
+ _, err := sh.Restore(context.Background(), prm)
require.ErrorIs(t, err, shard.ErrReadOnlyMode)
})
@@ -234,7 +234,7 @@ func TestStream(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
- _, err := sh1.Put(prm)
+ _, err := sh1.Put(context.Background(), prm)
require.NoError(t, err)
}
@@ -269,7 +269,7 @@ func TestStream(t *testing.T) {
}
func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) {
- res, err := sh.Restore(prm)
+ res, err := sh.Restore(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, len(objects), res.Count())
@@ -333,7 +333,7 @@ func TestDumpIgnoreErrors(t *testing.T) {
var prm shard.PutPrm
prm.SetObject(objects[i])
- _, err := sh.Put(prm)
+ _, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index 76e4347d4..66c61fccc 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -3,9 +3,12 @@ package shard
import (
"context"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ExistsPrm groups the parameters of Exists operation.
@@ -36,6 +39,13 @@ func (p ExistsRes) Exists() bool {
// Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed.
// Returns the object.ErrObjectIsExpired if the object is presented but already expired.
func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Exists",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
var exists bool
var err error
@@ -54,7 +64,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
existsPrm.SetAddress(prm.addr)
var res meta.ExistsRes
- res, err = s.metaBase.Exists(existsPrm)
+ res, err = s.metaBase.Exists(ctx, existsPrm)
exists = res.Exists()
}
diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go
index 5ea9ecedf..86995cd06 100644
--- a/pkg/local_object_storage/shard/gc.go
+++ b/pkg/local_object_storage/shard/gc.go
@@ -234,7 +234,7 @@ func (s *Shard) removeGarbage() {
deletePrm.SetAddresses(buf...)
// delete accumulated objects
- _, err = s.delete(deletePrm)
+ _, err = s.delete(context.TODO(), deletePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
zap.String("error", err.Error()),
@@ -320,7 +320,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
inhumePrm.SetGCMark()
// inhume the collected objects
- res, err := s.metaBase.Inhume(inhumePrm)
+ res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
zap.String("error", err.Error()),
@@ -485,7 +485,7 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid
// and clears up corresponding graveyard records.
//
// Does not modify tss.
-func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
+func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) {
if s.GetMode().NoMetabase() {
return
}
@@ -502,7 +502,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
pInhume.SetAddresses(tsAddrs...)
// inhume tombstones
- res, err := s.metaBase.Inhume(pInhume)
+ res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
zap.String("error", err.Error()),
@@ -547,7 +547,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
pInhume.SetAddresses(lockers...)
pInhume.SetForceGCMark()
- res, err := s.metaBase.Inhume(pInhume)
+ res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
zap.String("error", err.Error()),
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 8012e60f8..b0126fcd7 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -100,14 +100,14 @@ func Test_GCDropsLockedExpiredObject(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- err = sh.Lock(cnr, lockID, []oid.ID{objID})
+ err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
epoch.Value = 105
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 8a0296ac6..5268ac790 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -96,7 +96,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
- obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
+ obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return GetRes{
obj: obj,
@@ -109,7 +109,7 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
var emptyStorageID = make([]byte, 0)
// fetchObjectData looks through writeCache and blobStor to find object.
-func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
+func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta bool, cb storFetcher, wc func(w writecache.Cache) (*objectSDK.Object, error)) (*objectSDK.Object, bool, error) {
var (
mErr error
mRes meta.ExistsRes
@@ -118,7 +118,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
if !skipMeta {
var mPrm meta.ExistsPrm
mPrm.SetAddress(addr)
- mRes, mErr = s.metaBase.Exists(mPrm)
+ mRes, mErr = s.metaBase.Exists(ctx, mPrm)
if mErr != nil && !s.info.Mode.NoMetabase() {
return nil, false, mErr
}
@@ -154,7 +154,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
var mPrm meta.StorageIDPrm
mPrm.SetAddress(addr)
- mExRes, err := s.metaBase.StorageID(mPrm)
+ mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
}
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index f670b2864..ea28c8e32 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -40,7 +40,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@@ -58,7 +58,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(obj))
@@ -86,7 +86,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
getPrm.SetAddress(object.AddressOf(child))
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 8e8ff9433..a15cdfdca 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -73,7 +73,7 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
headParams.SetRaw(prm.raw)
var res meta.GetRes
- res, err = s.metaBase.Get(headParams)
+ res, err = s.metaBase.Get(ctx, headParams)
obj = res.Header()
}
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index 449626e93..11e7a8b04 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -37,7 +37,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(obj))
@@ -62,7 +62,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
putPrm.SetObject(child)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
headPrm.SetAddress(object.AddressOf(parent))
diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go
index 3457188be..12a2900ac 100644
--- a/pkg/local_object_storage/shard/inhume.go
+++ b/pkg/local_object_storage/shard/inhume.go
@@ -5,9 +5,12 @@ import (
"errors"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -62,6 +65,12 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Inhume",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ ))
+ defer span.End()
+
s.m.RLock()
if s.info.Mode.ReadOnly() {
@@ -74,7 +83,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
if s.hasWriteCache() {
for i := range prm.target {
- _ = s.writeCache.Delete(prm.target[i])
+ _ = s.writeCache.Delete(ctx, prm.target[i])
}
}
@@ -92,7 +101,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
metaPrm.SetForceGCMark()
}
- res, err := s.metaBase.Inhume(metaPrm)
+ res, err := s.metaBase.Inhume(ctx, metaPrm)
if err != nil {
if errors.Is(err, meta.ErrLockObjectRemoval) {
s.m.RUnlock()
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 41845c414..0b4e51701 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -42,7 +42,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
var getPrm shard.GetPrm
getPrm.SetAddress(object.AddressOf(obj))
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
_, err = testGet(t, sh, getPrm, hasWriteCache)
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index bab1090eb..aaa1112cd 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -1,6 +1,7 @@
package shard
import (
+ "context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -85,7 +86,7 @@ func (s *Shard) List() (res SelectRes, err error) {
sPrm.SetContainerID(lst[i])
sPrm.SetFilters(filters)
- sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
+ sRes, err := s.metaBase.Select(context.TODO(), sPrm) // consider making List in metabase
if err != nil {
s.log.Debug(logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 33c9e489a..8fac41a0f 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -1,6 +1,7 @@
package shard_test
import (
+ "context"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -52,7 +53,7 @@ func testShardList(t *testing.T, sh *shard.Shard) {
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
}
diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go
index d8113cf30..cfbd94c5b 100644
--- a/pkg/local_object_storage/shard/lock.go
+++ b/pkg/local_object_storage/shard/lock.go
@@ -1,11 +1,15 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Lock marks objects as locked with another object. All objects from the
@@ -14,7 +18,16 @@ import (
// Allows locking regular objects only (otherwise returns apistatus.LockNonRegularObject).
//
// Locked list should be unique. Panics if it is empty.
-func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Lock",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", idCnr.EncodeToString()),
+ attribute.String("locker", locker.EncodeToString()),
+ attribute.Int("locked_count", len(locked)),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -25,7 +38,7 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
return ErrDegradedMode
}
- err := s.metaBase.Lock(idCnr, locker, locked)
+ err := s.metaBase.Lock(ctx, idCnr, locker, locked)
if err != nil {
return fmt.Errorf("metabase lock: %w", err)
}
@@ -35,7 +48,14 @@ func (s *Shard) Lock(idCnr cid.ID, locker oid.ID, locked []oid.ID) error {
// IsLocked checks object locking relation of the provided object. Not found object is
// considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise.
-func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
+func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.IsLocked",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
m := s.GetMode()
if m.NoMetabase() {
return false, ErrDegradedMode
@@ -44,7 +64,7 @@ func (s *Shard) IsLocked(addr oid.Address) (bool, error) {
var prm meta.IsLockedPrm
prm.SetAddress(addr)
- res, err := s.metaBase.IsLocked(prm)
+ res, err := s.metaBase.IsLocked(ctx, prm)
if err != nil {
return false, err
}
diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go
index 2bee66298..c577ae184 100644
--- a/pkg/local_object_storage/shard/lock_test.go
+++ b/pkg/local_object_storage/shard/lock_test.go
@@ -76,16 +76,16 @@ func TestShard_Lock(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// lock the object
- err = sh.Lock(cnr, lockID, []oid.ID{objID})
+ err = sh.Lock(context.Background(), cnr, lockID, []oid.ID{objID})
require.NoError(t, err)
putPrm.SetObject(lock)
- _, err = sh.Put(putPrm)
+ _, err = sh.Put(context.Background(), putPrm)
require.NoError(t, err)
t.Run("inhuming locked objects", func(t *testing.T) {
@@ -158,21 +158,21 @@ func TestShard_IsLocked(t *testing.T) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
// not locked object is not locked
- locked, err := sh.IsLocked(objectcore.AddressOf(obj))
+ locked, err := sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.False(t, locked)
// locked object is locked
- require.NoError(t, sh.Lock(cnrID, lockID, []oid.ID{objID}))
+ require.NoError(t, sh.Lock(context.Background(), cnrID, lockID, []oid.ID{objID}))
- locked, err = sh.IsLocked(objectcore.AddressOf(obj))
+ locked, err = sh.IsLocked(context.Background(), objectcore.AddressOf(obj))
require.NoError(t, err)
require.True(t, locked)
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 18e97e259..1578c662b 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -109,7 +109,7 @@ func TestCounters(t *testing.T) {
for i := 0; i < objNumber; i++ {
prm.SetObject(oo[i])
- _, err := sh.Put(prm)
+ _, err := sh.Put(context.Background(), prm)
require.NoError(t, err)
}
@@ -168,7 +168,7 @@ func TestCounters(t *testing.T) {
deletedNumber := int(phy / 4)
prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...)
- _, err := sh.Delete(prm)
+ _, err := sh.Delete(context.Background(), prm)
require.NoError(t, err)
require.Equal(t, phy-uint64(deletedNumber), mm.objCounters[physical])
@@ -207,6 +207,7 @@ func shardWithMetrics(t *testing.T, path string) (*shard.Shard, *metricsStore) {
}
sh := shard.New(
+ shard.WithID(shard.NewIDFromBytes([]byte{})),
shard.WithBlobStorOptions(blobOpts...),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(path, "pilorama"))),
shard.WithMetaBaseOptions(
diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go
index f3199ac07..119910623 100644
--- a/pkg/local_object_storage/shard/move.go
+++ b/pkg/local_object_storage/shard/move.go
@@ -1,9 +1,14 @@
package shard
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -23,7 +28,14 @@ func (p *ToMoveItPrm) SetAddress(addr oid.Address) {
// ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to
// another shard.
-func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
+func (s *Shard) ToMoveIt(ctx context.Context, prm ToMoveItPrm) (ToMoveItRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ToMoveIt",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", prm.addr.EncodeToString()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -37,7 +49,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
var toMovePrm meta.ToMoveItPrm
toMovePrm.SetAddress(prm.addr)
- _, err := s.metaBase.ToMoveIt(toMovePrm)
+ _, err := s.metaBase.ToMoveIt(ctx, toMovePrm)
if err != nil {
s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
zap.String("error", err.Error()),
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index a4cb2cb1f..d7d4ae538 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -1,13 +1,17 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -30,7 +34,14 @@ func (p *PutPrm) SetObject(obj *object.Object) {
// did not allow to completely save the object.
//
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
-func (s *Shard) Put(prm PutPrm) (PutRes, error) {
+func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Put",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("address", objectCore.AddressOf(prm.obj).EncodeToString()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -55,7 +66,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
// ahead of `Put` by storage engine
tryCache := s.hasWriteCache() && !m.NoMetabase()
if tryCache {
- res, err = s.writeCache.Put(putPrm)
+ res, err = s.writeCache.Put(ctx, putPrm)
}
if err != nil || !tryCache {
if err != nil {
@@ -63,7 +74,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
zap.String("err", err.Error()))
}
- res, err = s.blobStor.Put(putPrm)
+ res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
}
@@ -73,7 +84,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
- if _, err := s.metaBase.Put(pPrm); err != nil {
+ if _, err := s.metaBase.Put(ctx, pPrm); err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 4355c31a3..06aea2f8a 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -123,7 +123,7 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
}
skipMeta := prm.skipMeta || s.info.Mode.NoMetabase()
- obj, hasMeta, err := s.fetchObjectData(prm.addr, skipMeta, cb, wc)
+ obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc)
return RngRes{
obj: obj,
diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go
index 164181214..c95dbae98 100644
--- a/pkg/local_object_storage/shard/range_test.go
+++ b/pkg/local_object_storage/shard/range_test.go
@@ -99,7 +99,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
var rngPrm shard.RngPrm
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 1bfa33dd7..9ad05f525 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -44,6 +44,7 @@ func TestShardReload(t *testing.T) {
meta.WithEpochState(epochState{})}
opts := []Option{
+ WithID(NewIDFromBytes([]byte{})),
WithLogger(l),
WithBlobStorOptions(blobOpts...),
WithMetaBaseOptions(metaOpts...),
@@ -75,7 +76,7 @@ func TestShardReload(t *testing.T) {
checkHasObjects(t, true)
t.Run("same config, no-op", func(t *testing.T) {
- require.NoError(t, sh.Reload(opts...))
+ require.NoError(t, sh.Reload(context.Background(), opts...))
checkHasObjects(t, true)
})
@@ -86,7 +87,7 @@ func TestShardReload(t *testing.T) {
}
newOpts := newShardOpts(filepath.Join(p, "meta1"), false)
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, false) // new path, but no resync
@@ -97,7 +98,7 @@ func TestShardReload(t *testing.T) {
})
newOpts = newShardOpts(filepath.Join(p, "meta2"), true)
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
checkHasObjects(t, true) // all objects are restored, including the new one
@@ -106,7 +107,7 @@ func TestShardReload(t *testing.T) {
require.NoError(t, os.WriteFile(badPath, []byte{1}, 0))
newOpts = newShardOpts(badPath, true)
- require.Error(t, sh.Reload(newOpts...))
+ require.Error(t, sh.Reload(context.Background(), newOpts...))
// Cleanup is done, no panic.
obj := newObject()
@@ -117,7 +118,7 @@ func TestShardReload(t *testing.T) {
// Successive reload produces no undesired effects.
require.NoError(t, os.RemoveAll(badPath))
- require.NoError(t, sh.Reload(newOpts...))
+ require.NoError(t, sh.Reload(context.Background(), newOpts...))
obj = newObject()
require.NoError(t, putObject(sh, obj))
@@ -132,7 +133,7 @@ func putObject(sh *Shard, obj *objectSDK.Object) error {
var prm PutPrm
prm.SetObject(obj)
- _, err := sh.Put(prm)
+ _, err := sh.Put(context.Background(), prm)
return err
}
diff --git a/pkg/local_object_storage/shard/restore.go b/pkg/local_object_storage/shard/restore.go
index 73dc1d178..2cb64a518 100644
--- a/pkg/local_object_storage/shard/restore.go
+++ b/pkg/local_object_storage/shard/restore.go
@@ -2,13 +2,17 @@ package shard
import (
"bytes"
+ "context"
"encoding/binary"
"errors"
"io"
"os"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ErrInvalidMagic is returned when dump format is invalid.
@@ -57,8 +61,15 @@ func (r RestoreRes) FailCount() int {
// Restore restores objects from the dump prepared by Dump.
//
// Returns any error encountered.
-func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
- // Disallow changing mode during restore.
+func (s *Shard) Restore(ctx context.Context, prm RestorePrm) (RestoreRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Restore",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("path", prm.path),
+ attribute.Bool("ignore_errors", prm.ignoreErrors),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -122,7 +133,7 @@ func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) {
}
putPrm.SetObject(obj)
- _, err = s.Put(putPrm)
+ _, err = s.Put(ctx, putPrm)
if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) {
return RestoreRes{}, err
}
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 4bb467d48..7f776c18a 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -1,12 +1,16 @@
package shard
import (
+ "context"
"fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// SelectPrm groups the parameters of Select operation.
@@ -39,7 +43,14 @@ func (r SelectRes) AddressList() []oid.Address {
//
// Returns any error encountered that
// did not allow to completely select the objects.
-func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
+func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Select",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", prm.cnr.EncodeToString()),
+ ))
+ defer span.End()
+
s.m.RLock()
defer s.m.RUnlock()
@@ -51,7 +62,7 @@ func (s *Shard) Select(prm SelectPrm) (SelectRes, error) {
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
- mRes, err := s.metaBase.Select(selectPrm)
+ mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
}
diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go
index 5fd13221a..714811b7e 100644
--- a/pkg/local_object_storage/shard/shutdown_test.go
+++ b/pkg/local_object_storage/shard/shutdown_test.go
@@ -43,7 +43,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
for i := range objects {
putPrm.SetObject(objects[i])
- _, err := sh.Put(putPrm)
+ _, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
}
require.NoError(t, sh.Close())
diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go
index 7282f121c..245eb4c70 100644
--- a/pkg/local_object_storage/shard/writecache.go
+++ b/pkg/local_object_storage/shard/writecache.go
@@ -1,7 +1,12 @@
package shard
import (
+ "context"
"errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation.
@@ -19,7 +24,14 @@ func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) {
var errWriteCacheDisabled = errors.New("write-cache is disabled")
// FlushWriteCache flushes all data from the write-cache.
-func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
+func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.FlushWriteCache",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Bool("ignore_errors", p.ignoreErrors),
+ ))
+ defer span.End()
+
if !s.hasWriteCache() {
return errWriteCacheDisabled
}
@@ -35,5 +47,5 @@ func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error {
return ErrDegradedMode
}
- return s.writeCache.Flush(p.ignoreErrors)
+ return s.writeCache.Flush(ctx, p.ignoreErrors)
}
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index dcfea8dd0..c1aab9e5a 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -1,16 +1,27 @@
package writecache
import (
+ "context"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// Delete removes object from write-cache.
//
// Returns an error of type apistatus.ObjectNotFound if object is missing in write-cache.
-func (c *cache) Delete(addr oid.Address) error {
+func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Delete",
+ trace.WithAttributes(
+ attribute.String("address", addr.EncodeToString()),
+ ))
+ defer span.End()
+
c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
if c.readOnly() {
@@ -45,7 +56,7 @@ func (c *cache) Delete(addr oid.Address) error {
return nil
}
- _, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
storagelog.Write(c.log,
storagelog.AddressField(saddr),
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 3ca3aa905..9d0ffc98c 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -2,9 +2,11 @@ package writecache
import (
"bytes"
+ "context"
"errors"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@@ -15,6 +17,8 @@ import (
"github.com/mr-tron/base58"
"github.com/nspcc-dev/neo-go/pkg/util/slice"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -37,7 +41,7 @@ func (c *cache) runFlushLoop() {
}
c.wg.Add(1)
- go c.flushBigObjects()
+ go c.flushBigObjects(context.TODO())
c.wg.Add(1)
go func() {
@@ -141,7 +145,7 @@ func (c *cache) flushDB() {
}
}
-func (c *cache) flushBigObjects() {
+func (c *cache) flushBigObjects(ctx context.Context) {
defer c.wg.Done()
tick := time.NewTicker(defaultFlushInterval * 10)
@@ -157,7 +161,7 @@ func (c *cache) flushBigObjects() {
continue
}
- _ = c.flushFSTree(true)
+ _ = c.flushFSTree(ctx, true)
c.modeMtx.RUnlock()
case <-c.closeCh:
@@ -176,7 +180,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) {
}
}
-func (c *cache) flushFSTree(ignoreErrors bool) error {
+func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
var prm common.IteratePrm
prm.IgnoreErrors = ignoreErrors
prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
@@ -205,7 +209,7 @@ func (c *cache) flushFSTree(ignoreErrors bool) error {
return err
}
- err = c.flushObject(&obj, data)
+ err = c.flushObject(ctx, &obj, data)
if err != nil {
if ignoreErrors {
return nil
@@ -236,7 +240,7 @@ func (c *cache) flushWorker(_ int) {
return
}
- err := c.flushObject(obj, nil)
+ err := c.flushObject(context.TODO(), obj, nil)
if err == nil {
c.flushed.Add(objectCore.AddressOf(obj).EncodeToString(), true)
}
@@ -244,14 +248,14 @@ func (c *cache) flushWorker(_ int) {
}
// flushObject is used to write object directly to the main storage.
-func (c *cache) flushObject(obj *object.Object, data []byte) error {
+func (c *cache) flushObject(ctx context.Context, obj *object.Object, data []byte) error {
addr := objectCore.AddressOf(obj)
var prm common.PutPrm
prm.Object = obj
prm.RawData = data
- res, err := c.blobstor.Put(prm)
+ res, err := c.blobstor.Put(ctx, prm)
if err != nil {
if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) &&
!errors.Is(err, blobstor.ErrNoPlaceFound) {
@@ -276,15 +280,21 @@ func (c *cache) flushObject(obj *object.Object, data []byte) error {
// Flush flushes all objects from the write-cache to the main storage.
// Write-cache must be in readonly mode to ensure correctness of an operation and
// to prevent interference with background flush workers.
-func (c *cache) Flush(ignoreErrors bool) error {
+func (c *cache) Flush(ctx context.Context, ignoreErrors bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Flush",
+ trace.WithAttributes(
+ attribute.Bool("ignore_errors", ignoreErrors),
+ ))
+ defer span.End()
+
c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
- return c.flush(ignoreErrors)
+ return c.flush(ctx, ignoreErrors)
}
-func (c *cache) flush(ignoreErrors bool) error {
- if err := c.flushFSTree(ignoreErrors); err != nil {
+func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
+ if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
return err
}
@@ -316,7 +326,7 @@ func (c *cache) flush(ignoreErrors bool) error {
return err
}
- if err := c.flushObject(&obj, data); err != nil {
+ if err := c.flushObject(ctx, &obj, data); err != nil {
return err
}
}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 9dc216fb3..2cec07081 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -89,7 +89,7 @@ func TestFlush(t *testing.T) {
var mPrm meta.StorageIDPrm
mPrm.SetAddress(objects[i].addr)
- mRes, err := mb.StorageID(mPrm)
+ mRes, err := mb.StorageID(context.Background(), mPrm)
require.NoError(t, err)
var prm common.GetPrm
@@ -112,12 +112,12 @@ func TestFlush(t *testing.T) {
wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true)
wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false)
- require.NoError(t, wc.Flush(false))
+ require.NoError(t, wc.Flush(context.Background(), false))
for i := 0; i < 2; i++ {
var mPrm meta.GetPrm
mPrm.SetAddress(objects[i].addr)
- _, err := mb.Get(mPrm)
+ _, err := mb.Get(context.Background(), mPrm)
require.Error(t, err)
_, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
@@ -147,7 +147,7 @@ func TestFlush(t *testing.T) {
for i := 0; i < 2; i++ {
var mPrm meta.GetPrm
mPrm.SetAddress(objects[i].addr)
- _, err := mb.Get(mPrm)
+ _, err := mb.Get(context.Background(), mPrm)
require.Error(t, err)
_, err = bs.Get(context.Background(), common.GetPrm{Address: objects[i].addr})
@@ -171,9 +171,9 @@ func TestFlush(t *testing.T) {
require.NoError(t, mb.SetMode(mode.ReadWrite))
require.Equal(t, uint32(0), errCount.Load())
- require.Error(t, wc.Flush(false))
+ require.Error(t, wc.Flush(context.Background(), false))
require.True(t, errCount.Load() > 0)
- require.NoError(t, wc.Flush(true))
+ require.NoError(t, wc.Flush(context.Background(), true))
check(t, mb, bs, objects)
}
@@ -202,7 +202,7 @@ func TestFlush(t *testing.T) {
prm.Address = objectCore.AddressOf(obj)
prm.RawData = data
- _, err := c.fsTree.Put(prm)
+ _, err := c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
@@ -218,7 +218,7 @@ func TestFlush(t *testing.T) {
var prm common.PutPrm
prm.Address = oidtest.Address()
prm.RawData = []byte{1, 2, 3}
- _, err := c.fsTree.Put(prm)
+ _, err := c.fsTree.Put(context.Background(), prm)
require.NoError(t, err)
})
})
@@ -245,19 +245,19 @@ func TestFlush(t *testing.T) {
for i := range objects {
var prm meta.PutPrm
prm.SetObject(objects[i].obj)
- _, err := mb.Put(prm)
+ _, err := mb.Put(context.Background(), prm)
require.NoError(t, err)
}
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(objects[0].addr, objects[1].addr)
inhumePrm.SetTombstoneAddress(oidtest.Address())
- _, err := mb.Inhume(inhumePrm)
+ _, err := mb.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
var deletePrm meta.DeletePrm
deletePrm.SetAddresses(objects[2].addr, objects[3].addr)
- _, err = mb.Delete(deletePrm)
+ _, err = mb.Delete(context.Background(), deletePrm)
require.NoError(t, err)
require.NoError(t, bs.SetMode(mode.ReadOnly))
@@ -294,7 +294,7 @@ func putObject(t *testing.T, c Cache, size int) objectPair {
prm.Object = obj
prm.RawData = data
- _, err := c.Put(prm)
+ _, err := c.Put(context.Background(), prm)
require.NoError(t, err)
return objectPair{prm.Address, prm.Object}
diff --git a/pkg/local_object_storage/writecache/init.go b/pkg/local_object_storage/writecache/init.go
index 0ac8cea99..d92e9a2d9 100644
--- a/pkg/local_object_storage/writecache/init.go
+++ b/pkg/local_object_storage/writecache/init.go
@@ -15,21 +15,21 @@ import (
"go.uber.org/zap"
)
-func (c *cache) initFlushMarks() {
+func (c *cache) initFlushMarks(ctx context.Context) {
var localWG sync.WaitGroup
localWG.Add(1)
go func() {
defer localWG.Done()
- c.fsTreeFlushMarkUpdate()
+ c.fsTreeFlushMarkUpdate(ctx)
}()
localWG.Add(1)
go func() {
defer localWG.Done()
- c.dbFlushMarkUpdate()
+ c.dbFlushMarkUpdate(ctx)
}()
c.initWG.Add(1)
@@ -54,7 +54,7 @@ func (c *cache) initFlushMarks() {
var errStopIter = errors.New("stop iteration")
-func (c *cache) fsTreeFlushMarkUpdate() {
+func (c *cache) fsTreeFlushMarkUpdate(ctx context.Context) {
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree)
var prm common.IteratePrm
@@ -67,14 +67,14 @@ func (c *cache) fsTreeFlushMarkUpdate() {
default:
}
- flushed, needRemove := c.flushStatus(addr)
+ flushed, needRemove := c.flushStatus(ctx, addr)
if flushed {
c.store.flushed.Add(addr.EncodeToString(), true)
if needRemove {
var prm common.DeletePrm
prm.Address = addr
- _, err := c.fsTree.Delete(prm)
+ _, err := c.fsTree.Delete(ctx, prm)
if err == nil {
storagelog.Write(c.log,
storagelog.AddressField(addr),
@@ -90,7 +90,7 @@ func (c *cache) fsTreeFlushMarkUpdate() {
c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks)
}
-func (c *cache) dbFlushMarkUpdate() {
+func (c *cache) dbFlushMarkUpdate(ctx context.Context) {
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase)
var m []string
@@ -125,7 +125,7 @@ func (c *cache) dbFlushMarkUpdate() {
continue
}
- flushed, needRemove := c.flushStatus(addr)
+ flushed, needRemove := c.flushStatus(ctx, addr)
if flushed {
c.store.flushed.Add(addr.EncodeToString(), true)
if needRemove {
@@ -165,11 +165,11 @@ func (c *cache) dbFlushMarkUpdate() {
// flushStatus returns info about the object state in the main storage.
// First return value is true iff object exists.
// Second return value is true iff object can be safely removed.
-func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
+func (c *cache) flushStatus(ctx context.Context, addr oid.Address) (bool, bool) {
var existsPrm meta.ExistsPrm
existsPrm.SetAddress(addr)
- _, err := c.metabase.Exists(existsPrm)
+ _, err := c.metabase.Exists(ctx, existsPrm)
if err != nil {
needRemove := errors.Is(err, meta.ErrObjectIsExpired) || errors.As(err, new(apistatus.ObjectAlreadyRemoved))
return needRemove, needRemove
@@ -178,7 +178,7 @@ func (c *cache) flushStatus(addr oid.Address) (bool, bool) {
var prm meta.StorageIDPrm
prm.SetAddress(addr)
- mRes, _ := c.metabase.StorageID(prm)
- res, err := c.blobstor.Exists(context.TODO(), common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
+ mRes, _ := c.metabase.StorageID(ctx, prm)
+ res, err := c.blobstor.Exists(ctx, common.ExistsPrm{Address: addr, StorageID: mRes.StorageID()})
return err == nil && res.Exists, false
}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 939dc5b06..20b0cce29 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -1,12 +1,16 @@
package writecache
import (
+ "context"
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
// ErrReadOnly is returned when Put/Write is performed in a read-only mode.
@@ -19,19 +23,25 @@ var ErrNotInitialized = logicerr.New("write-cache is not initialized yet")
// When shard is put in read-only mode all objects in memory are flushed to disk
// and all background jobs are suspended.
func (c *cache) SetMode(m mode.Mode) error {
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode",
+ trace.WithAttributes(
+ attribute.String("mode", m.String()),
+ ))
+ defer span.End()
+
c.modeMtx.Lock()
defer c.modeMtx.Unlock()
- return c.setMode(m)
+ return c.setMode(ctx, m)
}
// setMode applies new mode. Must be called with cache.modeMtx lock taken.
-func (c *cache) setMode(m mode.Mode) error {
+func (c *cache) setMode(ctx context.Context, m mode.Mode) error {
var err error
turnOffMeta := m.NoMetabase()
if turnOffMeta && !c.mode.NoMetabase() {
- err = c.flush(true)
+ err = c.flush(ctx, true)
if err != nil {
return err
}
@@ -45,7 +55,7 @@ func (c *cache) setMode(m mode.Mode) error {
defer func() {
if err == nil && !turnOffMeta {
- c.initFlushMarks()
+ c.initFlushMarks(ctx)
}
}()
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index cca8986b3..3434e9355 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -19,14 +19,14 @@ type Option func(*options)
// meta is an interface for a metabase.
type metabase interface {
- Exists(meta.ExistsPrm) (meta.ExistsRes, error)
- StorageID(meta.StorageIDPrm) (meta.StorageIDRes, error)
+ Exists(context.Context, meta.ExistsPrm) (meta.ExistsRes, error)
+ StorageID(context.Context, meta.StorageIDPrm) (meta.StorageIDRes, error)
UpdateStorageID(meta.UpdateStorageIDPrm) (meta.UpdateStorageIDRes, error)
}
// blob is an interface for the blobstor.
type blob interface {
- Put(common.PutPrm) (common.PutRes, error)
+ Put(context.Context, common.PutPrm) (common.PutRes, error)
NeedsCompression(obj *objectSDK.Object) bool
Exists(ctx context.Context, res common.ExistsPrm) (common.ExistsRes, error)
}
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 7791e93dc..e2535d9e2 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -1,11 +1,15 @@
package writecache
import (
+ "context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var (
@@ -21,7 +25,14 @@ var (
// Returns ErrNotInitialized if write-cache has not been initialized yet.
// Returns ErrOutOfSpace if saving an object leads to WC's size overflow.
// Returns ErrBigObject if an objects exceeds maximum object size.
-func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) {
+func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "writecache.Put",
+ trace.WithAttributes(
+ attribute.String("address", prm.Address.EncodeToString()),
+ attribute.Bool("dont_compress", prm.DontCompress),
+ ))
+ defer span.End()
+
c.modeMtx.RLock()
defer c.modeMtx.RUnlock()
if c.readOnly() {
@@ -44,7 +55,7 @@ func (c *cache) Put(prm common.PutPrm) (common.PutRes, error) {
if sz <= c.smallObjectSize {
return common.PutRes{}, c.putSmall(oi)
}
- return common.PutRes{}, c.putBig(oi.addr, prm)
+ return common.PutRes{}, c.putBig(ctx, oi.addr, prm)
}
// putSmall persists small objects to the write-cache database and
@@ -71,13 +82,13 @@ func (c *cache) putSmall(obj objectInfo) error {
}
// putBig writes object to FSTree and pushes it to the flush workers queue.
-func (c *cache) putBig(addr string, prm common.PutPrm) error {
+func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error {
cacheSz := c.estimateCacheSize()
if c.maxCacheSize < c.incSizeFS(cacheSz) {
return ErrOutOfSpace
}
- _, err := c.fsTree.Put(prm)
+ _, err := c.fsTree.Put(ctx, prm)
if err != nil {
return err
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index ff7eb1d6a..aeae752e3 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -1,6 +1,7 @@
package writecache
import (
+ "context"
"errors"
"fmt"
"os"
@@ -146,7 +147,7 @@ func (c *cache) deleteFromDisk(keys []string) []string {
continue
}
- _, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
+ _, err := c.fsTree.Delete(context.TODO(), common.DeletePrm{Address: addr})
if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go
index 24070dbda..0fc2e601a 100644
--- a/pkg/local_object_storage/writecache/writecache.go
+++ b/pkg/local_object_storage/writecache/writecache.go
@@ -5,6 +5,7 @@ import (
"os"
"sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -32,13 +33,13 @@ type Cache interface {
//
// Returns apistatus.ObjectNotFound if object is missing in the Cache.
// Returns ErrReadOnly if the Cache is currently in the read-only mode.
- Delete(oid.Address) error
+ Delete(context.Context, oid.Address) error
Iterate(IterationPrm) error
- Put(common.PutPrm) (common.PutRes, error)
+ Put(context.Context, common.PutPrm) (common.PutRes, error)
SetMode(mode.Mode) error
SetLogger(*logger.Logger)
DumpInfo() Info
- Flush(bool) error
+ Flush(context.Context, bool) error
Init() error
Open(readOnly bool) error
@@ -152,7 +153,10 @@ func (c *cache) Open(readOnly bool) error {
// Init runs necessary services.
func (c *cache) Init() error {
- c.initFlushMarks()
+ ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.Init")
+ defer span.End()
+
+ c.initFlushMarks(ctx)
c.runFlushLoop()
return nil
}
@@ -163,7 +167,7 @@ func (c *cache) Close() error {
defer c.modeMtx.Unlock()
// Finish all in-progress operations.
- if err := c.setMode(mode.ReadOnly); err != nil {
+ if err := c.setMode(context.TODO(), mode.ReadOnly); err != nil {
return err
}
diff --git a/pkg/services/control/server/flush_cache.go b/pkg/services/control/server/flush_cache.go
index fdfd136a6..9ead530db 100644
--- a/pkg/services/control/server/flush_cache.go
+++ b/pkg/services/control/server/flush_cache.go
@@ -9,7 +9,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) {
+func (s *Server) FlushCache(ctx context.Context, req *control.FlushCacheRequest) (*control.FlushCacheResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -19,7 +19,7 @@ func (s *Server) FlushCache(_ context.Context, req *control.FlushCacheRequest) (
var prm engine.FlushWriteCachePrm
prm.SetShardID(shardID)
- _, err = s.s.FlushWriteCache(prm)
+ _, err = s.s.FlushWriteCache(ctx, prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/control/server/restore.go b/pkg/services/control/server/restore.go
index 0e6367951..dba186f57 100644
--- a/pkg/services/control/server/restore.go
+++ b/pkg/services/control/server/restore.go
@@ -9,7 +9,7 @@ import (
"google.golang.org/grpc/status"
)
-func (s *Server) RestoreShard(_ context.Context, req *control.RestoreShardRequest) (*control.RestoreShardResponse, error) {
+func (s *Server) RestoreShard(ctx context.Context, req *control.RestoreShardRequest) (*control.RestoreShardResponse, error) {
err := s.isValidRequest(req)
if err != nil {
return nil, status.Error(codes.PermissionDenied, err.Error())
@@ -21,7 +21,7 @@ func (s *Server) RestoreShard(_ context.Context, req *control.RestoreShardReques
prm.WithPath(req.GetBody().GetFilepath())
prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
- err = s.s.RestoreShard(shardID, prm)
+ err = s.s.RestoreShard(ctx, shardID, prm)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
diff --git a/pkg/services/object/delete/util.go b/pkg/services/object/delete/util.go
index f9870f7e0..f6341f02a 100644
--- a/pkg/services/object/delete/util.go
+++ b/pkg/services/object/delete/util.go
@@ -120,7 +120,7 @@ func (w *putSvcWrapper) put(ctx context.Context, exec *execCtx) (*oid.ID, error)
WithCommonPrm(exec.commonParameters()).
WithObject(exec.tombstoneObj.CutPayload())
- err = streamer.Init(initPrm)
+ err = streamer.Init(ctx, initPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go
index 10a6af271..6beb67476 100644
--- a/pkg/services/object/internal/client/client.go
+++ b/pkg/services/object/internal/client/client.go
@@ -8,6 +8,7 @@ import (
"fmt"
"io"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@@ -402,6 +403,9 @@ func (x PutObjectRes) ID() oid.ID {
//
// Returns any error which prevented the operation from completing correctly in error return.
func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObject")
+ defer span.End()
+
var prmCli client.PrmObjectPutInit
prmCli.MarkLocal()
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/put/distributed.go
index 15296f83f..4b2056802 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/put/distributed.go
@@ -117,7 +117,7 @@ func (x errIncompletePut) Error() string {
return commonMsg
}
-func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
+func (t *distributedTarget) WriteHeader(_ context.Context, obj *objectSDK.Object) error {
t.obj = obj
return nil
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/put/local.go
index 2e6a496f3..7aef9f065 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/put/local.go
@@ -14,15 +14,15 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(*object.Object) error
+ Put(context.Context, *object.Object) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
// Lock must lock passed objects
// and return any appeared error.
- Lock(locker oid.Address, toLock []oid.ID) error
+ Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error
// IsLocked must clarify object's lock status.
- IsLocked(oid.Address) (bool, error)
+ IsLocked(context.Context, oid.Address) (bool, error)
}
type localTarget struct {
@@ -47,7 +47,7 @@ func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers
return nil, fmt.Errorf("could not delete objects from tombstone locally: %w", err)
}
case object.TypeLock:
- err := t.storage.Lock(objectCore.AddressOf(t.obj), t.meta.Objects())
+ err := t.storage.Lock(ctx, objectCore.AddressOf(t.obj), t.meta.Objects())
if err != nil {
return nil, fmt.Errorf("could not lock object from lock objects locally: %w", err)
}
@@ -55,7 +55,7 @@ func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers
// objects that do not change meta storage
}
- if err := t.storage.Put(t.obj); err != nil {
+ if err := t.storage.Put(ctx, t.obj); err != nil { //TODO
return nil, fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
}
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index e355990a3..ea885366b 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -32,13 +32,13 @@ var errNotInit = errors.New("stream not initialized")
var errInitRecall = errors.New("init recall")
-func (p *Streamer) Init(prm *PutInitPrm) error {
+func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
// initialize destination target
if err := p.initTarget(prm); err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
- if err := p.target.WriteHeader(prm.hdr); err != nil {
+ if err := p.target.WriteHeader(ctx, prm.hdr); err != nil {
return fmt.Errorf("(%T) could not write header to target: %w", p, err)
}
return nil
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 3b8d7b88c..f7a97a955 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -5,6 +5,7 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
@@ -15,6 +16,8 @@ import (
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type streamer struct {
@@ -34,6 +37,9 @@ type sizes struct {
}
func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.Send")
+ defer span.End()
+
switch v := req.GetBody().GetObjectPart().(type) {
case *object.PutObjectPartInit:
var initPrm *putsvc.PutInitPrm
@@ -43,7 +49,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
return err
}
- if err = s.stream.Init(initPrm); err != nil {
+ if err = s.stream.Init(ctx, initPrm); err != nil {
err = fmt.Errorf("(%T) could not init object put stream: %w", s, err)
}
@@ -105,6 +111,9 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
}
func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.CloseAndRecv")
+ defer span.End()
+
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
@@ -121,6 +130,9 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error
}
func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c client.MultiAddressClient) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.relayRequest")
+ defer span.End()
+
// open stream
resp := new(object.PutResponse)
@@ -129,6 +141,12 @@ func (s *streamer) relayRequest(ctx context.Context, info client.NodeInfo, c cli
var firstErr error
info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "putv2.streamer.iterateAddress",
+ trace.WithAttributes(
+ attribute.String("address", addr.String()),
+ ))
+ defer span.End()
+
var err error
defer func() {
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/put/validation.go
index 70c6974d3..8c40d0677 100644
--- a/pkg/services/object/put/validation.go
+++ b/pkg/services/object/put/validation.go
@@ -41,7 +41,7 @@ var (
ErrWrongPayloadSize = errors.New("wrong payload size")
)
-func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error {
+func (t *validatingTarget) WriteHeader(ctx context.Context, obj *objectSDK.Object) error {
t.payloadSz = obj.PayloadSize()
chunkLn := uint64(len(obj.Payload()))
@@ -73,11 +73,11 @@ func (t *validatingTarget) WriteHeader(obj *objectSDK.Object) error {
t.checksum = cs.Value()
}
- if err := t.fmt.Validate(obj, t.unpreparedObject); err != nil {
+ if err := t.fmt.Validate(ctx, obj, t.unpreparedObject); err != nil {
return fmt.Errorf("(%T) coult not validate object format: %w", t, err)
}
- err := t.nextTarget.WriteHeader(obj)
+ err := t.nextTarget.WriteHeader(ctx, obj)
if err != nil {
return err
}
diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go
index f768c8861..1af69caf1 100644
--- a/pkg/services/object/search/local.go
+++ b/pkg/services/object/search/local.go
@@ -1,12 +1,14 @@
package searchsvc
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"go.uber.org/zap"
)
-func (exec *execCtx) executeLocal() {
- ids, err := exec.svc.localStorage.search(exec)
+func (exec *execCtx) executeLocal(ctx context.Context) {
+ ids, err := exec.svc.localStorage.search(ctx, exec)
if err != nil {
exec.status = statusUndefined
diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go
index b192e1d04..7a7cbfc5b 100644
--- a/pkg/services/object/search/search.go
+++ b/pkg/services/object/search/search.go
@@ -27,7 +27,7 @@ func (exec *execCtx) execute(ctx context.Context) {
exec.log.Debug(logs.ServingRequest)
// perform local operation
- exec.executeLocal()
+ exec.executeLocal(ctx)
exec.analyzeStatus(ctx, true)
}
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index e95970955..75059103f 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -107,7 +107,7 @@ func (c *testClientCache) get(info clientcore.NodeInfo) (searchClient, error) {
return v, nil
}
-func (s *testStorage) search(exec *execCtx) ([]oid.ID, error) {
+func (s *testStorage) search(_ context.Context, exec *execCtx) ([]oid.ID, error) {
v, ok := s.items[exec.containerID().EncodeToString()]
if !ok {
return nil, nil
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index b858e2219..708979d79 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -37,7 +37,7 @@ type cfg struct {
log *logger.Logger
localStorage interface {
- search(*execCtx) ([]oid.ID, error)
+ search(context.Context, *execCtx) ([]oid.ID, error)
}
clientConstructor interface {
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 49f3e5efd..b5b351a3b 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -117,12 +117,12 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
return res.IDList(), nil
}
-func (e *storageEngineWrapper) search(exec *execCtx) ([]oid.ID, error) {
+func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
selectPrm.WithContainerID(exec.containerID())
- r, err := e.storage.Select(selectPrm)
+ r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
return nil, err
}
diff --git a/pkg/services/object_manager/transformer/fmt.go b/pkg/services/object_manager/transformer/fmt.go
index 462cc7474..fbe8af2fb 100644
--- a/pkg/services/object_manager/transformer/fmt.go
+++ b/pkg/services/object_manager/transformer/fmt.go
@@ -48,7 +48,7 @@ func NewFormatTarget(p *FormatterParams) ObjectTarget {
}
}
-func (f *formatter) WriteHeader(obj *object.Object) error {
+func (f *formatter) WriteHeader(_ context.Context, obj *object.Object) error {
f.obj = obj
return nil
@@ -97,7 +97,7 @@ func (f *formatter) Close(ctx context.Context) (*AccessIdentifiers, error) {
return nil, fmt.Errorf("could not finalize object: %w", err)
}
- if err := f.prm.NextTarget.WriteHeader(f.obj); err != nil {
+ if err := f.prm.NextTarget.WriteHeader(ctx, f.obj); err != nil {
return nil, fmt.Errorf("could not write header to next target: %w", err)
}
diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go
index 199f5d0c1..c23b4dca7 100644
--- a/pkg/services/object_manager/transformer/transformer.go
+++ b/pkg/services/object_manager/transformer/transformer.go
@@ -56,7 +56,7 @@ func NewPayloadSizeLimiter(maxSize uint64, withoutHomomorphicHash bool, targetIn
}
}
-func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error {
+func (s *payloadSizeLimiter) WriteHeader(_ context.Context, hdr *object.Object) error {
s.current = fromObject(hdr)
s.initialize()
@@ -190,7 +190,7 @@ func (s *payloadSizeLimiter) release(ctx context.Context, finalize bool) (*Acces
writeHashes(s.currentHashers)
// release current, get its id
- if err := s.target.WriteHeader(s.current); err != nil {
+ if err := s.target.WriteHeader(ctx, s.current); err != nil {
return nil, fmt.Errorf("could not write header: %w", err)
}
diff --git a/pkg/services/object_manager/transformer/types.go b/pkg/services/object_manager/transformer/types.go
index 3e6e2feff..73cea5216 100644
--- a/pkg/services/object_manager/transformer/types.go
+++ b/pkg/services/object_manager/transformer/types.go
@@ -28,7 +28,7 @@ type ObjectTarget interface {
// that depends on the implementation.
//
// Must not be called after Close call.
- WriteHeader(*object.Object) error
+ WriteHeader(context.Context, *object.Object) error
// Write writes object payload chunk.
//
From 6121b541b5ac79b58d3b7e9e891e47f258f8a6e5 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 13 Apr 2023 15:36:20 +0300
Subject: [PATCH 0112/1943] [#242] treesvc: Add tracing spans
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/tree.go | 175 ++++++++++++++---
pkg/local_object_storage/engine/tree_test.go | 4 +-
pkg/local_object_storage/pilorama/boltdb.go | 138 ++++++++++++--
pkg/local_object_storage/pilorama/forest.go | 25 +--
.../pilorama/forest_test.go | 155 +++++++--------
.../pilorama/interface.go | 26 +--
pkg/local_object_storage/shard/tree.go | 176 +++++++++++++++---
pkg/services/tree/drop.go | 4 +-
pkg/services/tree/getsubtree_test.go | 7 +-
pkg/services/tree/redirect.go | 9 +
pkg/services/tree/replicator.go | 32 +++-
pkg/services/tree/service.go | 30 +--
pkg/services/tree/sync.go | 15 +-
13 files changed, 601 insertions(+), 195 deletions(-)
diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go
index b69ab4890..e7d66094c 100644
--- a/pkg/local_object_storage/engine/tree.go
+++ b/pkg/local_object_storage/engine/tree.go
@@ -1,24 +1,39 @@
package engine
import (
+ "context"
"errors"
+ "fmt"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
var _ pilorama.Forest = (*StorageEngine)(nil)
// TreeMove implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
- index, lst, err := e.getTreeShard(d.CID, treeID)
+func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeMove",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
- lm, err := lst[index].TreeMove(d, treeID, m)
+ lm, err := lst[index].TreeMove(ctx, d, treeID, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeMove`", err,
@@ -32,13 +47,26 @@ func (e *StorageEngine) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pil
}
// TreeAddByPath implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
- index, lst, err := e.getTreeShard(d.CID, treeID)
+func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, m []pilorama.KeyValue) ([]pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(m)),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, d.CID, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return nil, err
}
- lm, err := lst[index].TreeAddByPath(d, treeID, attr, path, m)
+ lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err,
@@ -51,13 +79,22 @@ func (e *StorageEngine) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, a
}
// TreeApply implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
- index, lst, err := e.getTreeShard(cnr, treeID)
+func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApply",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cnr, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
- err = lst[index].TreeApply(cnr, treeID, m, backgroundSync)
+ err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync)
if err != nil {
if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't perform `TreeApply`", err,
@@ -70,11 +107,22 @@ func (e *StorageEngine) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move
}
// TreeGetByPath implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
var err error
var nodes []pilorama.Node
for _, sh := range e.sortShardsByWeight(cid) {
- nodes, err = sh.TreeGetByPath(cid, treeID, attr, path, latest)
+ nodes, err = sh.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -92,12 +140,21 @@ func (e *StorageEngine) TreeGetByPath(cid cidSDK.ID, treeID string, attr string,
}
// TreeGetMeta implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
var err error
var m pilorama.Meta
var p uint64
for _, sh := range e.sortShardsByWeight(cid) {
- m, p, err = sh.TreeGetMeta(cid, treeID, nodeID)
+ m, p, err = sh.TreeGetMeta(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -115,11 +172,20 @@ func (e *StorageEngine) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID piloram
}
// TreeGetChildren implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
var err error
var nodes []uint64
for _, sh := range e.sortShardsByWeight(cid) {
- nodes, err = sh.TreeGetChildren(cid, treeID, nodeID)
+ nodes, err = sh.TreeGetChildren(ctx, cid, treeID, nodeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -137,11 +203,20 @@ func (e *StorageEngine) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pil
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
var err error
var lm pilorama.Move
for _, sh := range e.sortShardsByWeight(cid) {
- lm, err = sh.TreeGetOpLog(cid, treeID, height)
+ lm, err = sh.TreeGetOpLog(ctx, cid, treeID, height)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -159,10 +234,18 @@ func (e *StorageEngine) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64
}
// TreeDrop implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var err error
for _, sh := range e.sortShardsByWeight(cid) {
- err = sh.TreeDrop(cid, treeID)
+ err = sh.TreeDrop(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -180,11 +263,18 @@ func (e *StorageEngine) TreeDrop(cid cidSDK.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
+func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeList",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
var resIDs []string
for _, sh := range e.unsortedShards() {
- ids, err := sh.TreeList(cid)
+ ids, err := sh.TreeList(ctx, cid)
if err != nil {
if errors.Is(err, shard.ErrPiloramaDisabled) || errors.Is(err, shard.ErrReadOnlyMode) {
return nil, err
@@ -205,8 +295,16 @@ func (e *StorageEngine) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
- _, _, err := e.getTreeShard(cid, treeID)
+func (e *StorageEngine) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeExists",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
+ _, _, err := e.getTreeShard(ctx, cid, treeID)
if errors.Is(err, pilorama.ErrTreeNotFound) {
return false, nil
}
@@ -214,13 +312,22 @@ func (e *StorageEngine) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
- index, lst, err := e.getTreeShard(cid, treeID)
+func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
+ index, lst, err := e.getTreeShard(ctx, cid, treeID)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
return err
}
- err = lst[index].TreeUpdateLastSyncHeight(cid, treeID, height)
+ err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled {
e.reportShardError(lst[index], "can't update tree synchronization height", err,
zap.Stringer("cid", cid),
@@ -230,11 +337,19 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, h
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var err error
var height uint64
for _, sh := range e.sortShardsByWeight(cid) {
- height, err = sh.TreeLastSyncHeight(cid, treeID)
+ height, err = sh.TreeLastSyncHeight(ctx, cid, treeID)
if err != nil {
if err == shard.ErrPiloramaDisabled {
break
@@ -251,10 +366,10 @@ func (e *StorageEngine) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64
return height, err
}
-func (e *StorageEngine) getTreeShard(cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
+func (e *StorageEngine) getTreeShard(ctx context.Context, cid cidSDK.ID, treeID string) (int, []hashedShard, error) {
lst := e.sortShardsByWeight(cid)
for i, sh := range lst {
- exists, err := sh.TreeExists(cid, treeID)
+ exists, err := sh.TreeExists(ctx, cid, treeID)
if err != nil {
return 0, nil, err
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 77573c9e6..c2bae9772 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -36,7 +36,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
if err != nil {
b.Fatal(err)
}
- _, err = te.ng.TreeAddByPath(d, treeID, pilorama.AttributeFilename, nil,
+ _, err = te.ng.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, nil,
[]pilorama.KeyValue{{pilorama.AttributeFilename, []byte(strconv.Itoa(i))}})
if err != nil {
b.Fatal(err)
@@ -63,7 +63,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
})
b.Run("TreeGetByPath", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- nodes, err := te.ng.TreeGetByPath(cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
+ nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 994c3d416..1ecc89cb5 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -2,6 +2,7 @@ package pilorama
import (
"bytes"
+ "context"
"encoding/binary"
"errors"
"fmt"
@@ -11,12 +12,15 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/io"
"go.etcd.io/bbolt"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
type boltForest struct {
@@ -144,7 +148,17 @@ func (t *boltForest) Close() error {
}
// TreeMove implements the Forest interface.
-func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error) {
+func (t *boltForest) TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeMove",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -175,7 +189,15 @@ func (t *boltForest) TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, e
}
// TreeExists implements the Forest interface.
-func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
+func (t *boltForest) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeExists",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -197,7 +219,16 @@ func (t *boltForest) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
var syncHeightKey = []byte{'h'}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
+func (t *boltForest) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
rawHeight := make([]byte, 8)
binary.LittleEndian.PutUint64(rawHeight, height)
@@ -214,7 +245,15 @@ func (t *boltForest) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, heig
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (t *boltForest) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
var height uint64
buck := bucketName(cid, treeID)
@@ -235,7 +274,20 @@ func (t *boltForest) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, e
}
// TreeAddByPath implements the Forest interface.
-func (t *boltForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
+func (t *boltForest) TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(meta)),
+ ),
+ )
+ defer span.End()
+
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -329,7 +381,16 @@ func (t *boltForest) findSpareID(bTree *bbolt.Bucket) uint64 {
}
// TreeApply implements the Forest interface.
-func (t *boltForest) TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error {
+func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApply",
+ trace.WithAttributes(
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -627,7 +688,18 @@ func (t *boltForest) isAncestor(b *bbolt.Bucket, parent, child Node) bool {
}
// TreeGetByPath implements the Forest interface.
-func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@@ -686,7 +758,16 @@ func (t *boltForest) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, pa
}
// TreeGetMeta implements the forest interface.
-func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+func (t *boltForest) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -717,7 +798,16 @@ func (t *boltForest) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Met
}
// TreeGetChildren implements the Forest interface.
-func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
+func (t *boltForest) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -749,7 +839,14 @@ func (t *boltForest) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node)
}
// TreeList implements the Forest interface.
-func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
+func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeList",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -783,7 +880,16 @@ func (t *boltForest) TreeList(cid cidSDK.ID) ([]string, error) {
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+func (t *boltForest) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
@@ -813,7 +919,15 @@ func (t *boltForest) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (
}
// TreeDrop implements the pilorama.Forest interface.
-func (t *boltForest) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (t *boltForest) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
t.modeMtx.RLock()
defer t.modeMtx.RUnlock()
diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go
index fa2f1dcd2..84530977c 100644
--- a/pkg/local_object_storage/pilorama/forest.go
+++ b/pkg/local_object_storage/pilorama/forest.go
@@ -1,6 +1,7 @@
package pilorama
import (
+ "context"
"sort"
"strings"
@@ -25,7 +26,7 @@ func NewMemoryForest() ForestStorage {
}
// TreeMove implements the Forest interface.
-func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move, error) {
+func (f *memoryForest) TreeMove(_ context.Context, d CIDDescriptor, treeID string, op *Move) (*Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -48,7 +49,7 @@ func (f *memoryForest) TreeMove(d CIDDescriptor, treeID string, op *Move) (*Move
}
// TreeAddByPath implements the Forest interface.
-func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
+func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID string, attr string, path []string, m []KeyValue) ([]Move, error) {
if !d.checkValid() {
return nil, ErrInvalidCIDDescriptor
}
@@ -93,7 +94,7 @@ func (f *memoryForest) TreeAddByPath(d CIDDescriptor, treeID string, attr string
}
// TreeApply implements the Forest interface.
-func (f *memoryForest) TreeApply(cnr cid.ID, treeID string, op *Move, _ bool) error {
+func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, op *Move, _ bool) error {
fullID := cnr.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -119,7 +120,7 @@ func (f *memoryForest) Close() error {
}
// TreeGetByPath implements the Forest interface.
-func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
+func (f *memoryForest) TreeGetByPath(_ context.Context, cid cid.ID, treeID string, attr string, path []string, latest bool) ([]Node, error) {
if !isAttributeInternal(attr) {
return nil, ErrNotPathAttribute
}
@@ -134,7 +135,7 @@ func (f *memoryForest) TreeGetByPath(cid cid.ID, treeID string, attr string, pat
}
// TreeGetMeta implements the Forest interface.
-func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
+func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, nodeID Node) (Meta, Node, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -145,7 +146,7 @@ func (f *memoryForest) TreeGetMeta(cid cid.ID, treeID string, nodeID Node) (Meta
}
// TreeGetChildren implements the Forest interface.
-func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) ([]uint64, error) {
+func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID string, nodeID Node) ([]uint64, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -163,7 +164,7 @@ func (f *memoryForest) TreeGetChildren(cid cid.ID, treeID string, nodeID Node) (
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (Move, error) {
+func (f *memoryForest) TreeGetOpLog(_ context.Context, cid cid.ID, treeID string, height uint64) (Move, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
@@ -180,7 +181,7 @@ func (f *memoryForest) TreeGetOpLog(cid cid.ID, treeID string, height uint64) (M
}
// TreeDrop implements the pilorama.Forest interface.
-func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error {
+func (f *memoryForest) TreeDrop(_ context.Context, cid cid.ID, treeID string) error {
cidStr := cid.String()
if treeID == "" {
for k := range f.treeMap {
@@ -200,7 +201,7 @@ func (f *memoryForest) TreeDrop(cid cid.ID, treeID string) error {
}
// TreeList implements the pilorama.Forest interface.
-func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) {
+func (f *memoryForest) TreeList(_ context.Context, cid cid.ID) ([]string, error) {
var res []string
cidStr := cid.EncodeToString()
@@ -217,14 +218,14 @@ func (f *memoryForest) TreeList(cid cid.ID) ([]string, error) {
}
// TreeExists implements the pilorama.Forest interface.
-func (f *memoryForest) TreeExists(cid cid.ID, treeID string) (bool, error) {
+func (f *memoryForest) TreeExists(_ context.Context, cid cid.ID, treeID string) (bool, error) {
fullID := cid.EncodeToString() + "/" + treeID
_, ok := f.treeMap[fullID]
return ok, nil
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, height uint64) error {
+func (f *memoryForest) TreeUpdateLastSyncHeight(_ context.Context, cid cid.ID, treeID string, height uint64) error {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
@@ -235,7 +236,7 @@ func (f *memoryForest) TreeUpdateLastSyncHeight(cid cid.ID, treeID string, heigh
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (f *memoryForest) TreeLastSyncHeight(cid cid.ID, treeID string) (uint64, error) {
+func (f *memoryForest) TreeLastSyncHeight(_ context.Context, cid cid.ID, treeID string) (uint64, error) {
fullID := cid.EncodeToString() + "/" + treeID
t, ok := f.treeMap[fullID]
if !ok {
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index bbd35246c..be53b3fe2 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1,6 +1,7 @@
package pilorama
import (
+ "context"
"fmt"
"math/rand"
"os"
@@ -49,7 +50,7 @@ var providers = []struct {
}
func testMeta(t *testing.T, f Forest, cid cidSDK.ID, treeID string, nodeID, parentID Node, expected Meta) {
- actualMeta, actualParent, err := f.TreeGetMeta(cid, treeID, nodeID)
+ actualMeta, actualParent, err := f.TreeGetMeta(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.Equal(t, parentID, actualParent)
require.Equal(t, expected, actualMeta)
@@ -71,13 +72,13 @@ func testForestTreeMove(t *testing.T, s Forest) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
{Key: AttributeFilename, Value: []byte("file.txt")}}
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
nodeID := lm[2].Child
t.Run("invalid descriptor", func(t *testing.T) {
- _, err = s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, &Move{
+ _, err = s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -85,7 +86,7 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("same parent, update meta", func(t *testing.T) {
- res, err := s.TreeMove(d, treeID, &Move{
+ res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: lm[1].Child,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -93,12 +94,12 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
t.Run("different parent", func(t *testing.T) {
- res, err := s.TreeMove(d, treeID, &Move{
+ res, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: RootID,
Meta: Meta{Items: append(meta, KeyValue{Key: "NewKey", Value: []byte("NewValue")})},
Child: nodeID,
@@ -106,11 +107,11 @@ func testForestTreeMove(t *testing.T, s Forest) {
require.NoError(t, err)
require.Equal(t, res.Child, nodeID)
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.True(t, len(nodes) == 0)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{nodeID}, nodes)
})
@@ -130,7 +131,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
treeID := "version"
treeAdd := func(t *testing.T, child, parent Node) {
- _, err := s.TreeMove(d, treeID, &Move{
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
Parent: parent,
Child: child,
})
@@ -152,7 +153,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
treeAdd(t, 7, 0)
testGetChildren := func(t *testing.T, nodeID Node, expected []Node) {
- actual, err := s.TreeGetChildren(cid, treeID, nodeID)
+ actual, err := s.TreeGetChildren(context.Background(), cid, treeID, nodeID)
require.NoError(t, err)
require.ElementsMatch(t, expected, actual)
}
@@ -168,7 +169,7 @@ func testForestTreeGetChildren(t *testing.T, s Forest) {
testGetChildren(t, 42, nil)
})
t.Run("missing tree", func(t *testing.T) {
- _, err := s.TreeGetChildren(cid, treeID+"123", 0)
+ _, err := s.TreeGetChildren(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@@ -191,10 +192,10 @@ func testForestTreeDrop(t *testing.T, s Forest) {
cid := cids[0]
t.Run("return nil if not found", func(t *testing.T) {
- require.ErrorIs(t, s.TreeDrop(cid, "123"), ErrTreeNotFound)
+ require.ErrorIs(t, s.TreeDrop(context.Background(), cid, "123"), ErrTreeNotFound)
})
- require.NoError(t, s.TreeDrop(cid, ""))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
trees := []string{"tree1", "tree2"}
var descs [cidsSize]CIDDescriptor
@@ -203,39 +204,39 @@ func testForestTreeDrop(t *testing.T, s Forest) {
}
d := descs[0]
for i := range trees {
- _, err := s.TreeAddByPath(d, trees[i], AttributeFilename, []string{"path"},
+ _, err := s.TreeAddByPath(context.Background(), d, trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
- err := s.TreeDrop(cid, trees[0])
+ err := s.TreeDrop(context.Background(), cid, trees[0])
require.NoError(t, err)
- _, err = s.TreeGetByPath(cid, trees[0], AttributeFilename, []string{"path"}, true)
+ _, err = s.TreeGetByPath(context.Background(), cid, trees[0], AttributeFilename, []string{"path"}, true)
require.ErrorIs(t, err, ErrTreeNotFound)
- _, err = s.TreeGetByPath(cid, trees[1], AttributeFilename, []string{"path"}, true)
+ _, err = s.TreeGetByPath(context.Background(), cid, trees[1], AttributeFilename, []string{"path"}, true)
require.NoError(t, err)
for j := range descs {
for i := range trees {
- _, err := s.TreeAddByPath(descs[j], trees[i], AttributeFilename, []string{"path"},
+ _, err := s.TreeAddByPath(context.Background(), descs[j], trees[i], AttributeFilename, []string{"path"},
[]KeyValue{{Key: "TreeName", Value: []byte(trees[i])}})
require.NoError(t, err)
}
}
- list, err := s.TreeList(cid)
+ list, err := s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.NotEmpty(t, list)
- require.NoError(t, s.TreeDrop(cid, ""))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, ""))
- list, err = s.TreeList(cid)
+ list, err = s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.Empty(t, list)
for j := 1; j < len(cids); j++ {
- list, err = s.TreeList(cids[j])
+ list, err = s.TreeList(context.Background(), cids[j])
require.NoError(t, err)
require.Equal(t, len(list), len(trees))
}
@@ -264,24 +265,24 @@ func testForestTreeAdd(t *testing.T, s Forest) {
}
t.Run("invalid descriptor", func(t *testing.T) {
- _, err := s.TreeMove(CIDDescriptor{cid, 0, 0}, treeID, m)
+ _, err := s.TreeMove(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, m)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
- lm, err := s.TreeMove(d, treeID, m)
+ lm, err := s.TreeMove(context.Background(), d, treeID, m)
require.NoError(t, err)
testMeta(t, s, cid, treeID, lm.Child, lm.Parent, Meta{Time: lm.Time, Items: meta})
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{lm.Child}, nodes)
t.Run("other trees are unaffected", func(t *testing.T) {
- _, err := s.TreeGetByPath(cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
+ _, err := s.TreeGetByPath(context.Background(), cid, treeID+"123", AttributeFilename, []string{"file.txt"}, false)
require.ErrorIs(t, err, ErrTreeNotFound)
- _, _, err = s.TreeGetMeta(cid, treeID+"123", 0)
+ _, _, err = s.TreeGetMeta(context.Background(), cid, treeID+"123", 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@@ -304,15 +305,15 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
{Key: AttributeFilename, Value: []byte("file.txt")}}
t.Run("invalid descriptor", func(t *testing.T) {
- _, err := s.TreeAddByPath(CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
+ _, err := s.TreeAddByPath(context.Background(), CIDDescriptor{cid, 0, 0}, treeID, AttributeFilename, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrInvalidCIDDescriptor)
})
t.Run("invalid attribute", func(t *testing.T) {
- _, err := s.TreeAddByPath(d, treeID, AttributeVersion, []string{"yyy"}, meta)
+ _, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeVersion, []string{"yyy"}, meta)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 3, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("path")}}})
@@ -322,7 +323,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
testMeta(t, s, cid, treeID, firstID, lm[2].Parent, Meta{Time: lm[2].Time, Items: meta})
meta[0].Value = []byte("YYY")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -331,19 +332,19 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("get versions", func(t *testing.T) {
// All versions.
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, false)
require.NoError(t, err)
require.ElementsMatch(t, []Node{firstID, secondID}, nodes)
// Latest version.
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "to", "file.txt"}, true)
require.NoError(t, err)
require.Equal(t, []Node{secondID}, nodes)
})
meta[0].Value = []byte("ZZZ")
meta[1].Value = []byte("cat.jpg")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "dir"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
testMeta(t, s, cid, treeID, lm[0].Child, lm[0].Parent, Meta{Time: lm[0].Time, Items: []KeyValue{{AttributeFilename, []byte("dir")}}})
@@ -352,7 +353,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
t.Run("create internal nodes", func(t *testing.T) {
meta[0].Value = []byte("SomeValue")
meta[1].Value = []byte("another")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -360,7 +361,7 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
meta[0].Value = []byte("Leaf")
meta[1].Value = []byte("file.txt")
- lm, err = s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "another"}, meta)
+ lm, err = s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "another"}, meta)
require.NoError(t, err)
require.Equal(t, 2, len(lm))
@@ -375,12 +376,12 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
{AttributeFilename, []byte("another")}}})
t.Run("get by path", func(t *testing.T) {
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another"}, false)
require.NoError(t, err)
require.Equal(t, 2, len(nodes))
require.ElementsMatch(t, []Node{lm[0].Child, oldMove.Child}, nodes)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"path", "another", "file.txt"}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[1].Child, nodes[0])
@@ -391,11 +392,11 @@ func testForestTreeAddByPath(t *testing.T, s Forest) {
meta := []KeyValue{
{Key: AttributeVersion, Value: []byte("XXX")},
{Key: AttributeFilename, Value: []byte{}}}
- lm, err := s.TreeAddByPath(d, treeID, AttributeFilename, []string{"path", "to"}, meta)
+ lm, err := s.TreeAddByPath(context.Background(), d, treeID, AttributeFilename, []string{"path", "to"}, meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
- nodes, err := s.TreeGetByPath(d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), d.CID, treeID, AttributeFilename, []string{"path", "to", ""}, false)
require.NoError(t, err)
require.Equal(t, 1, len(nodes))
require.Equal(t, lm[0].Child, nodes[0])
@@ -415,7 +416,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
treeID := "version"
testApply := func(t *testing.T, s Forest, child, parent Node, meta Meta) {
- require.NoError(t, s.TreeApply(cid, treeID, &Move{
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Child: child,
Parent: parent,
Meta: meta,
@@ -475,16 +476,16 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
s := constructor(t)
t.Run("empty log, no panic", func(t *testing.T) {
- _, err := s.TreeGetOpLog(cid, treeID, 0)
+ _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
require.ErrorIs(t, err, ErrTreeNotFound)
})
for i := range logs {
- require.NoError(t, s.TreeApply(cid, treeID, &logs[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &logs[i], false))
}
testGetOpLog := func(t *testing.T, height uint64, m Move) {
- lm, err := s.TreeGetOpLog(cid, treeID, height)
+ lm, err := s.TreeGetOpLog(context.Background(), cid, treeID, height)
require.NoError(t, err)
require.Equal(t, m, lm)
}
@@ -498,7 +499,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
testGetOpLog(t, 261, Move{})
})
t.Run("missing tree", func(t *testing.T) {
- _, err := s.TreeGetOpLog(cid, treeID+"123", 4)
+ _, err := s.TreeGetOpLog(context.Background(), cid, treeID+"123", 4)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
@@ -515,7 +516,7 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O
s := constructor(t)
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
- actual, err := s.TreeExists(cid, treeID)
+ actual, err := s.TreeExists(context.Background(), cid, treeID)
require.NoError(t, err)
require.Equal(t, expected, actual)
}
@@ -527,13 +528,13 @@ func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...O
checkExists(t, false, cid, treeID)
})
- require.NoError(t, s.TreeApply(cid, treeID, &Move{Parent: 0, Child: 1}, false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{Parent: 0, Child: 1}, false))
checkExists(t, true, cid, treeID)
checkExists(t, false, cidtest.ID(), treeID) // different CID, same tree
checkExists(t, false, cid, "another tree") // same CID, different tree
t.Run("can be removed", func(t *testing.T) {
- require.NoError(t, s.TreeDrop(cid, treeID))
+ require.NoError(t, s.TreeDrop(context.Background(), cid, treeID))
checkExists(t, false, cid, treeID)
})
}
@@ -563,11 +564,11 @@ func TestApplyTricky1(t *testing.T) {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
for i := range ops {
- require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
- _, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child)
+ _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@@ -624,11 +625,11 @@ func TestApplyTricky2(t *testing.T) {
t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t)
for i := range ops {
- require.NoError(t, s.TreeApply(cid, treeID, &ops[i], false))
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := range expected {
- _, parent, err := s.TreeGetMeta(cid, treeID, expected[i].child)
+ _, parent, err := s.TreeGetMeta(context.Background(), cid, treeID, expected[i].child)
require.NoError(t, err)
require.Equal(t, expected[i].parent, parent)
}
@@ -697,9 +698,9 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
for i := uint64(0); i < uint64(nodeCount); i++ {
- expectedMeta, expectedParent, err := expected.TreeGetMeta(cid, treeID, i)
+ expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
- actualMeta, actualParent, err := actual.TreeGetMeta(cid, treeID, i)
+ actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err)
require.Equal(t, expectedParent, actualParent, "node id: %d", i)
require.Equal(t, expectedMeta, actualMeta, "node id: %d", i)
@@ -738,7 +739,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
expected := constructor(t)
for i := range ops {
- require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false))
+ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
for i := 0; i < iterCount; i++ {
@@ -753,7 +754,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
go func() {
defer wg.Done()
for op := range ch {
- require.NoError(t, actual.TreeApply(cid, treeID, op, false))
+ require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, op, false))
}
}()
}
@@ -783,7 +784,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
expected := constructor(t)
for i := range ops {
- require.NoError(t, expected.TreeApply(cid, treeID, &ops[i], false))
+ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
const iterCount = 200
@@ -793,7 +794,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
actual := constructor(t)
for i := range ops {
- require.NoError(t, actual.TreeApply(cid, treeID, &ops[i], false))
+ require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
compareForests(t, expected, actual, cid, treeID, nodeCount)
}
@@ -886,7 +887,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
b.SetParallelism(10)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
- if err := s.TreeApply(cid, treeID, &ops[<-ch], false); err != nil {
+ if err := s.TreeApply(context.Background(), cid, treeID, &ops[<-ch], false); err != nil {
b.Fatalf("error in `Apply`: %v", err)
}
}
@@ -929,27 +930,27 @@ func testTreeGetByPath(t *testing.T, s Forest) {
}
t.Run("invalid attribute", func(t *testing.T) {
- _, err := s.TreeGetByPath(cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
+ _, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeVersion, []string{"", "TTT"}, false)
require.ErrorIs(t, err, ErrNotPathAttribute)
})
- nodes, err := s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
+ nodes, err := s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"b", "cat1.jpg"}, false)
require.NoError(t, err)
require.Equal(t, []Node{4, 5}, nodes)
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat1.jpg"}, false)
require.Equal(t, []Node{3}, nodes)
t.Run("missing child", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"a", "cat3.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("missing parent", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, []string{"xyz", "cat1.jpg"}, false)
require.True(t, len(nodes) == 0)
})
t.Run("empty path", func(t *testing.T) {
- nodes, err = s.TreeGetByPath(cid, treeID, AttributeFilename, nil, false)
+ nodes, err = s.TreeGetByPath(context.Background(), cid, treeID, AttributeFilename, nil, false)
require.True(t, len(nodes) == 0)
})
}
@@ -961,7 +962,7 @@ func testMove(t *testing.T, s Forest, ts int, node, parent Node, cid cidSDK.ID,
items = append(items, KeyValue{AttributeVersion, []byte(version)})
}
- require.NoError(t, s.TreeApply(cid, treeID, &Move{
+ require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &Move{
Parent: parent,
Child: node,
Meta: Meta{
@@ -1000,7 +1001,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
d.CID = cid
for _, treeID := range treeIDs[cid] {
- _, err := s.TreeAddByPath(d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
+ _, err := s.TreeAddByPath(context.Background(), d, treeID, objectSDK.AttributeFileName, []string{"path"}, nil)
require.NoError(t, err)
}
}
@@ -1008,7 +1009,7 @@ func testTreeGetTrees(t *testing.T, s Forest) {
for _, cid := range cids {
d.CID = cid
- trees, err := s.TreeList(cid)
+ trees, err := s.TreeList(context.Background(), cid)
require.NoError(t, err)
require.ElementsMatch(t, treeIDs[cid], trees)
@@ -1028,38 +1029,38 @@ func testTreeLastSyncHeight(t *testing.T, f Forest) {
treeID := "someTree"
t.Run("ErrNotFound if no log operations are stored for a tree", func(t *testing.T) {
- _, err := f.TreeLastSyncHeight(cnr, treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
- err = f.TreeUpdateLastSyncHeight(cnr, treeID, 1)
+ err = f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 1)
require.ErrorIs(t, err, ErrTreeNotFound)
})
- _, err := f.TreeMove(CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
+ _, err := f.TreeMove(context.Background(), CIDDescriptor{CID: cnr, Size: 1}, treeID, &Move{
Parent: RootID,
Child: 1,
})
require.NoError(t, err)
- h, err := f.TreeLastSyncHeight(cnr, treeID)
+ h, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 0, h)
t.Run("separate storages for separate containers", func(t *testing.T) {
- _, err := f.TreeLastSyncHeight(cidtest.ID(), treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cidtest.ID(), treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
- require.NoError(t, f.TreeUpdateLastSyncHeight(cnr, treeID, 10))
+ require.NoError(t, f.TreeUpdateLastSyncHeight(context.Background(), cnr, treeID, 10))
- h, err = f.TreeLastSyncHeight(cnr, treeID)
+ h, err = f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.NoError(t, err)
require.EqualValues(t, 10, h)
t.Run("removed correctly", func(t *testing.T) {
- require.NoError(t, f.TreeDrop(cnr, treeID))
+ require.NoError(t, f.TreeDrop(context.Background(), cnr, treeID))
- _, err := f.TreeLastSyncHeight(cnr, treeID)
+ _, err := f.TreeLastSyncHeight(context.Background(), cnr, treeID)
require.ErrorIs(t, err, ErrTreeNotFound)
})
}
diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go
index 290f633a5..9ca721be8 100644
--- a/pkg/local_object_storage/pilorama/interface.go
+++ b/pkg/local_object_storage/pilorama/interface.go
@@ -1,6 +1,8 @@
package pilorama
import (
+ "context"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -11,43 +13,43 @@ type Forest interface {
// TreeMove moves node in the tree.
// If the parent of the move operation is TrashID, the node is removed.
// If the child of the move operation is RootID, new ID is generated and added to a tree.
- TreeMove(d CIDDescriptor, treeID string, m *Move) (*Move, error)
+ TreeMove(ctx context.Context, d CIDDescriptor, treeID string, m *Move) (*Move, error)
// TreeAddByPath adds new node in the tree using provided path.
// The path is constructed by descending from the root using the values of the attr in meta.
// Internal nodes in path should have exactly one attribute, otherwise a new node is created.
- TreeAddByPath(d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
+ TreeAddByPath(ctx context.Context, d CIDDescriptor, treeID string, attr string, path []string, meta []KeyValue) ([]Move, error)
// TreeApply applies replicated operation from another node.
// If background is true, TreeApply will first check whether an operation exists.
- TreeApply(cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
+ TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error
// TreeGetByPath returns all nodes corresponding to the path.
// The path is constructed by descending from the root using the values of the
// AttributeFilename in meta.
// The last argument determines whether only the node with the latest timestamp is returned.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the path is not in the tree.
- TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
+ TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]Node, error)
// TreeGetMeta returns meta information of the node with the specified ID.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeGetMeta(cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
+ TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) (Meta, Node, error)
// TreeGetChildren returns children of the node with the specified ID. The order is arbitrary.
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
- TreeGetChildren(cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error)
+ TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]uint64, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
- TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (Move, error)
+ TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)
// TreeDrop drops a tree from the database.
// If the tree is not found, ErrTreeNotFound should be returned.
// In case of empty treeID drops all trees related to container.
- TreeDrop(cid cidSDK.ID, treeID string) error
+ TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error
// TreeList returns all the tree IDs that have been added to the
// passed container ID. Nil slice should be returned if no tree found.
- TreeList(cid cidSDK.ID) ([]string, error)
+ TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error)
// TreeExists checks if a tree exists locally.
// If the tree is not found, false and a nil error should be returned.
- TreeExists(cid cidSDK.ID, treeID string) (bool, error)
+ TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error)
// TreeUpdateLastSyncHeight updates last log height synchronized with _all_ container nodes.
- TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error
+ TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error
// TreeLastSyncHeight returns last log height synchronized with _all_ container nodes.
- TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
+ TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error)
}
type ForestStorage interface {
diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go
index db07c001e..d5b3b67bf 100644
--- a/pkg/local_object_storage/shard/tree.go
+++ b/pkg/local_object_storage/shard/tree.go
@@ -1,9 +1,15 @@
package shard
import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
)
var _ pilorama.Forest = (*Shard)(nil)
@@ -12,7 +18,18 @@ var _ pilorama.Forest = (*Shard)(nil)
var ErrPiloramaDisabled = logicerr.New("pilorama is disabled")
// TreeMove implements the pilorama.Forest interface.
-func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID string, m *pilorama.Move) (*pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeMove",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -26,11 +43,25 @@ func (s *Shard) TreeMove(d pilorama.CIDDescriptor, treeID string, m *pilorama.Mo
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.pilorama.TreeMove(d, treeID, m)
+ return s.pilorama.TreeMove(ctx, d, treeID, m)
}
// TreeAddByPath implements the pilorama.Forest interface.
-func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) {
+func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, treeID string, attr string, path []string, meta []pilorama.KeyValue) ([]pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeAddByPath",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", d.CID.EncodeToString()),
+ attribute.Int("position", d.Position),
+ attribute.Int("size", d.Size),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Int("meta_count", len(meta)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -44,11 +75,21 @@ func (s *Shard) TreeAddByPath(d pilorama.CIDDescriptor, treeID string, attr stri
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.pilorama.TreeAddByPath(d, treeID, attr, path, meta)
+ return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta)
}
// TreeApply implements the pilorama.Forest interface.
-func (s *Shard) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
+func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *pilorama.Move, backgroundSync bool) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApply",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cnr.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.Bool("background", backgroundSync),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return ErrPiloramaDisabled
}
@@ -62,11 +103,23 @@ func (s *Shard) TreeApply(cnr cidSDK.ID, treeID string, m *pilorama.Move, backgr
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- return s.pilorama.TreeApply(cnr, treeID, m, backgroundSync)
+ return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync)
}
// TreeGetByPath implements the pilorama.Forest interface.
-func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("attr", attr),
+ attribute.Int("path_count", len(path)),
+ attribute.Bool("latest", latest),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -77,11 +130,21 @@ func (s *Shard) TreeGetByPath(cid cidSDK.ID, treeID string, attr string, path []
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.pilorama.TreeGetByPath(cid, treeID, attr, path, latest)
+ return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest)
}
// TreeGetMeta implements the pilorama.Forest interface.
-func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) (pilorama.Meta, uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetMeta",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return pilorama.Meta{}, 0, ErrPiloramaDisabled
}
@@ -92,11 +155,21 @@ func (s *Shard) TreeGetMeta(cid cidSDK.ID, treeID string, nodeID pilorama.Node)
if s.info.Mode.NoMetabase() {
return pilorama.Meta{}, 0, ErrDegradedMode
}
- return s.pilorama.TreeGetMeta(cid, treeID, nodeID)
+ return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID)
}
// TreeGetChildren implements the pilorama.Forest interface.
-func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node) ([]uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetChildren",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("node_id", fmt.Sprintf("%d", nodeID)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -107,11 +180,21 @@ func (s *Shard) TreeGetChildren(cid cidSDK.ID, treeID string, nodeID pilorama.No
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.pilorama.TreeGetChildren(cid, treeID, nodeID)
+ return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID)
}
// TreeGetOpLog implements the pilorama.Forest interface.
-func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (pilorama.Move, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetOpLog",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return pilorama.Move{}, ErrPiloramaDisabled
}
@@ -122,11 +205,20 @@ func (s *Shard) TreeGetOpLog(cid cidSDK.ID, treeID string, height uint64) (pilor
if s.info.Mode.NoMetabase() {
return pilorama.Move{}, ErrDegradedMode
}
- return s.pilorama.TreeGetOpLog(cid, treeID, height)
+ return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height)
}
// TreeDrop implements the pilorama.Forest interface.
-func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error {
+func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeDrop",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return ErrPiloramaDisabled
}
@@ -137,11 +229,19 @@ func (s *Shard) TreeDrop(cid cidSDK.ID, treeID string) error {
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- return s.pilorama.TreeDrop(cid, treeID)
+ return s.pilorama.TreeDrop(ctx, cid, treeID)
}
// TreeList implements the pilorama.Forest interface.
-func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) {
+func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeList",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return nil, ErrPiloramaDisabled
}
@@ -152,11 +252,20 @@ func (s *Shard) TreeList(cid cidSDK.ID) ([]string, error) {
if s.info.Mode.NoMetabase() {
return nil, ErrDegradedMode
}
- return s.pilorama.TreeList(cid)
+ return s.pilorama.TreeList(ctx, cid)
}
// TreeExists implements the pilorama.Forest interface.
-func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
+func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (bool, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeExists",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return false, ErrPiloramaDisabled
}
@@ -167,11 +276,21 @@ func (s *Shard) TreeExists(cid cidSDK.ID, treeID string) (bool, error) {
if s.info.Mode.NoMetabase() {
return false, ErrDegradedMode
}
- return s.pilorama.TreeExists(cid, treeID)
+ return s.pilorama.TreeExists(ctx, cid, treeID)
}
// TreeUpdateLastSyncHeight implements the pilorama.Forest interface.
-func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height uint64) error {
+func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeUpdateLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ attribute.String("height", fmt.Sprintf("%d", height)),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return ErrPiloramaDisabled
}
@@ -185,11 +304,20 @@ func (s *Shard) TreeUpdateLastSyncHeight(cid cidSDK.ID, treeID string, height ui
if s.info.Mode.NoMetabase() {
return ErrDegradedMode
}
- return s.pilorama.TreeUpdateLastSyncHeight(cid, treeID, height)
+ return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height)
}
// TreeLastSyncHeight implements the pilorama.Forest interface.
-func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error) {
+func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID string) (uint64, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeLastSyncHeight",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.String("container_id", cid.EncodeToString()),
+ attribute.String("tree_id", treeID),
+ ),
+ )
+ defer span.End()
+
if s.pilorama == nil {
return 0, ErrPiloramaDisabled
}
@@ -200,5 +328,5 @@ func (s *Shard) TreeLastSyncHeight(cid cidSDK.ID, treeID string) (uint64, error)
if s.info.Mode.NoMetabase() {
return 0, ErrDegradedMode
}
- return s.pilorama.TreeLastSyncHeight(cid, treeID)
+ return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID)
}
diff --git a/pkg/services/tree/drop.go b/pkg/services/tree/drop.go
index c0750cbdc..a9e4e2e71 100644
--- a/pkg/services/tree/drop.go
+++ b/pkg/services/tree/drop.go
@@ -7,8 +7,8 @@ import (
)
// DropTree drops a tree from the database. If treeID is empty, all the trees are dropped.
-func (s *Service) DropTree(_ context.Context, cid cid.ID, treeID string) error {
+func (s *Service) DropTree(ctx context.Context, cid cid.ID, treeID string) error {
// The only current use-case is a container removal, where all trees should be removed.
// Thus there is no need to replicate the operation on other node.
- return s.forest.TreeDrop(cid, treeID)
+ return s.forest.TreeDrop(ctx, cid, treeID)
}
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index fd65ac3f0..dc4ce29aa 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -1,6 +1,7 @@
package tree
import (
+ "context"
"errors"
"testing"
@@ -32,7 +33,7 @@ func TestGetSubTree(t *testing.T) {
meta := []pilorama.KeyValue{
{Key: pilorama.AttributeFilename, Value: []byte(path[len(path)-1])}}
- lm, err := p.TreeAddByPath(d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
+ lm, err := p.TreeAddByPath(context.Background(), d, treeID, pilorama.AttributeFilename, path[:len(path)-1], meta)
require.NoError(t, err)
require.Equal(t, 1, len(lm))
@@ -41,7 +42,7 @@ func TestGetSubTree(t *testing.T) {
testGetSubTree := func(t *testing.T, rootID uint64, depth uint32, errIndex int) []uint64 {
acc := subTreeAcc{errIndex: errIndex}
- err := getSubTree(&acc, d.CID, &GetSubTreeRequest_Body{
+ err := getSubTree(context.Background(), &acc, d.CID, &GetSubTreeRequest_Body{
TreeId: treeID,
RootId: rootID,
Depth: depth,
@@ -68,7 +69,7 @@ func TestGetSubTree(t *testing.T) {
// GetSubTree must return valid meta.
for i := range acc.seen {
b := acc.seen[i].Body
- meta, node, err := p.TreeGetMeta(d.CID, treeID, b.NodeId)
+ meta, node, err := p.TreeGetMeta(context.Background(), d.CID, treeID, b.NodeId)
require.NoError(t, err)
require.Equal(t, node, b.ParentId)
require.Equal(t, meta.Time, b.Timestamp)
diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go
index 9594514f1..3de71b554 100644
--- a/pkg/services/tree/redirect.go
+++ b/pkg/services/tree/redirect.go
@@ -5,8 +5,11 @@ import (
"context"
"errors"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -25,6 +28,12 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo
for _, n := range cntNodes {
var stop bool
n.IterateNetworkEndpoints(func(endpoint string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints",
+ trace.WithAttributes(
+ attribute.String("endpoint", endpoint),
+ ))
+ defer span.End()
+
c, err := s.cache.get(ctx, endpoint)
if err != nil {
return false
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 98ed3df39..60d0eff50 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -8,10 +8,13 @@ import (
"fmt"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -38,17 +41,25 @@ const (
defaultReplicatorSendTimeout = time.Second * 5
)
-func (s *Service) localReplicationWorker() {
+func (s *Service) localReplicationWorker(ctx context.Context) {
for {
select {
case <-s.closeCh:
return
case op := <-s.replicateLocalCh:
- err := s.forest.TreeApply(op.cid, op.treeID, &op.Move, false)
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationOperation",
+ trace.WithAttributes(
+ attribute.String("tree_id", op.treeID),
+ attribute.String("container_id", op.cid.EncodeToString()),
+ ),
+ )
+
+ err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
if err != nil {
s.log.Error(logs.TreeFailedToApplyReplicatedOperation,
zap.String("err", err.Error()))
}
+ span.End()
}
}
}
@@ -59,10 +70,24 @@ func (s *Service) replicationWorker(ctx context.Context) {
case <-s.closeCh:
return
case task := <-s.replicationTasks:
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTask",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())),
+ ),
+ )
+
var lastErr error
var lastAddr string
task.n.IterateNetworkEndpoints(func(addr string) bool {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint",
+ trace.WithAttributes(
+ attribute.String("public_key", hex.EncodeToString(task.n.PublicKey())),
+ attribute.String("address", addr),
+ ),
+ )
+ defer span.End()
+
lastAddr = addr
c, err := s.cache.get(ctx, addr)
@@ -89,6 +114,7 @@ func (s *Service) replicationWorker(ctx context.Context) {
zap.String("key", hex.EncodeToString(task.n.PublicKey())))
}
}
+ span.End()
}
}
}
@@ -96,7 +122,7 @@ func (s *Service) replicationWorker(ctx context.Context) {
func (s *Service) replicateLoop(ctx context.Context) {
for i := 0; i < s.replicatorWorkerCount; i++ {
go s.replicationWorker(ctx)
- go s.localReplicationWorker()
+ go s.localReplicationWorker(ctx)
}
defer func() {
for len(s.replicationTasks) != 0 {
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index edea450f1..35dfda3df 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -119,7 +119,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: b.GetParentId(),
Child: pilorama.RootID,
Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())},
@@ -174,7 +174,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- logs, err := s.forest.TreeAddByPath(d, b.GetTreeId(), attr, b.GetPath(), meta)
+ logs, err := s.forest.TreeAddByPath(ctx, d, b.GetTreeId(), attr, b.GetPath(), meta)
if err != nil {
return nil, err
}
@@ -231,7 +231,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: pilorama.TrashID,
Child: b.GetNodeId(),
})
@@ -280,7 +280,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er
}
d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)}
- log, err := s.forest.TreeMove(d, b.GetTreeId(), &pilorama.Move{
+ log, err := s.forest.TreeMove(ctx, d, b.GetTreeId(), &pilorama.Move{
Parent: b.GetParentId(),
Child: b.GetNodeId(),
Meta: pilorama.Meta{Items: protoToMeta(b.GetMeta())},
@@ -328,14 +328,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
attr = pilorama.AttributeFilename
}
- nodes, err := s.forest.TreeGetByPath(cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly())
+ nodes, err := s.forest.TreeGetByPath(ctx, cid, b.GetTreeId(), attr, b.GetPath(), b.GetLatestOnly())
if err != nil {
return nil, err
}
info := make([]*GetNodeByPathResponse_Info, 0, len(nodes))
for _, node := range nodes {
- m, parent, err := s.forest.TreeGetMeta(cid, b.GetTreeId(), node)
+ m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node)
if err != nil {
return nil, err
}
@@ -406,10 +406,10 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS
return nil
}
- return getSubTree(srv, cid, b, s.forest)
+ return getSubTree(srv.Context(), srv, cid, b, s.forest)
}
-func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
+func getSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error {
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
// recursive implementation is not suitable here, so we maintain explicit stack.
stack := [][]uint64{{b.GetRootId()}}
@@ -425,7 +425,7 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
nodeID := stack[len(stack)-1][0]
stack[len(stack)-1] = stack[len(stack)-1][1:]
- m, p, err := forest.TreeGetMeta(cid, b.GetTreeId(), nodeID)
+ m, p, err := forest.TreeGetMeta(ctx, cid, b.GetTreeId(), nodeID)
if err != nil {
return err
}
@@ -442,7 +442,7 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
}
if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
- children, err := forest.TreeGetChildren(cid, b.GetTreeId(), nodeID)
+ children, err := forest.TreeGetChildren(ctx, cid, b.GetTreeId(), nodeID)
if err != nil {
return err
}
@@ -455,7 +455,7 @@ func getSubTree(srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRe
}
// Apply locally applies operation from the remote node to the tree.
-func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) {
+func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) {
err := verifyMessage(req)
if err != nil {
return nil, err
@@ -468,7 +468,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e
key := req.GetSignature().GetKey()
- _, pos, _, err := s.getContainerInfo(cid, key)
+ _, pos, _, err := s.getContainerInfo(ctx, cid, key)
if err != nil {
return nil, err
}
@@ -532,7 +532,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
h := b.GetHeight()
for {
- lm, err := s.forest.TreeGetOpLog(cid, b.GetTreeId(), h)
+ lm, err := s.forest.TreeGetOpLog(srv.Context(), cid, b.GetTreeId(), h)
if err != nil || lm.Time == 0 {
return err
}
@@ -587,7 +587,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
return resp, outErr
}
- ids, err := s.forest.TreeList(cid)
+ ids, err := s.forest.TreeList(ctx, cid)
if err != nil {
return nil, err
}
@@ -623,7 +623,7 @@ func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
// getContainerInfo returns the list of container nodes, position in the container for the node
// with pub key and total amount of nodes in all replicas.
-func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
+func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) {
cntNodes, _, err := s.getContainerNodes(cid)
if err != nil {
return nil, 0, 0, err
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 91f43900f..6c4f585a6 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -85,7 +85,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
}
for _, tid := range treesToSync {
- h, err := s.forest.TreeLastSyncHeight(cid, tid)
+ h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid)
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
@@ -94,7 +94,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error {
}
newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes)
if h < newHeight {
- if err := s.forest.TreeUpdateLastSyncHeight(cid, tid, newHeight); err != nil {
+ if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil {
s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree,
zap.Stringer("cid", cid),
zap.String("tree", tid))
@@ -232,7 +232,7 @@ func (s *Service) synchronizeSingle(ctx context.Context, cid cid.ID, treeID stri
if err := m.Meta.FromBytes(lm.Meta); err != nil {
return newHeight, err
}
- if err := s.forest.TreeApply(cid, treeID, m, true); err != nil {
+ if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
return newHeight, err
}
if m.Time > newHeight {
@@ -284,11 +284,13 @@ func (s *Service) syncLoop(ctx context.Context) {
case <-ctx.Done():
return
case <-s.syncChan:
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync")
s.log.Debug(logs.TreeSyncingTrees)
cnrs, err := s.cfg.cnrSource.List()
if err != nil {
s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err))
+ span.End()
continue
}
@@ -299,11 +301,15 @@ func (s *Service) syncLoop(ctx context.Context) {
s.removeContainers(ctx, newMap)
s.log.Debug(logs.TreeTreesHaveBeenSynchronized)
+ span.End()
}
}
}
func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.syncContainers")
+ defer span.End()
+
// sync new containers
var wg sync.WaitGroup
for _, cnr := range cnrs {
@@ -335,6 +341,9 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
}
func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers")
+ defer span.End()
+
s.cnrMapMtx.Lock()
defer s.cnrMapMtx.Unlock()
From 560f73ab7ea83086f964505957967e1472ccfd1d Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Thu, 13 Apr 2023 20:06:34 +0300
Subject: [PATCH 0113/1943] [#247] node, ir: Drop reputation related code
Signed-off-by: Pavel Karpy
---
cmd/frostfs-ir/defaults.go | 1 -
cmd/frostfs-node/config.go | 27 --
cmd/frostfs-node/container.go | 2 +-
cmd/frostfs-node/main.go | 1 -
cmd/frostfs-node/morph.go | 3 -
cmd/frostfs-node/object.go | 192 +--------
cmd/frostfs-node/reputation.go | 386 ------------------
cmd/frostfs-node/reputation/common/remote.go | 101 -----
cmd/frostfs-node/reputation/common/util.go | 53 ---
.../reputation/intermediate/calculator.go | 57 ---
.../reputation/intermediate/consumers.go | 66 ---
.../reputation/intermediate/contract.go | 147 -------
.../reputation/intermediate/daughters.go | 51 ---
.../reputation/intermediate/remote.go | 125 ------
.../reputation/intermediate/storage.go | 64 ---
.../reputation/internal/client/client.go | 101 -----
.../reputation/internal/client/doc.go | 11 -
cmd/frostfs-node/reputation/local/remote.go | 113 -----
cmd/frostfs-node/reputation/local/storage.go | 108 -----
cmd/frostfs-node/reputation/ticker/fixed.go | 90 ----
.../reputation/ticker/fixed_test.go | 118 ------
cmd/frostfs-node/timers.go | 43 --
pkg/innerring/initialization.go | 37 --
.../processors/reputation/handlers.go | 29 --
.../processors/reputation/process_put.go | 99 -----
.../processors/reputation/processor.go | 156 -------
pkg/innerring/rpc.go | 2 +-
pkg/network/cache/client.go | 2 +-
.../transport/reputation/grpc/service.go | 50 ---
pkg/services/object/head/remote.go | 2 +-
pkg/services/reputation/common/deps.go | 78 ----
pkg/services/reputation/common/managers.go | 133 ------
.../reputation/common/router/calls.go | 139 -------
pkg/services/reputation/common/router/deps.go | 28 --
pkg/services/reputation/common/router/opts.go | 28 --
.../reputation/common/router/router.go | 81 ----
pkg/services/reputation/common/router/util.go | 40 --
.../eigentrust/calculator/calculator.go | 90 ----
.../reputation/eigentrust/calculator/calls.go | 295 -------------
.../reputation/eigentrust/calculator/deps.go | 74 ----
.../reputation/eigentrust/calculator/opts.go | 30 --
.../reputation/eigentrust/controller/calls.go | 73 ----
.../eigentrust/controller/controller.go | 86 ----
.../reputation/eigentrust/controller/deps.go | 37 --
.../reputation/eigentrust/controller/opts.go | 30 --
.../reputation/eigentrust/iteration.go | 44 --
.../reputation/eigentrust/routes/builder.go | 59 ---
.../reputation/eigentrust/routes/calls.go | 33 --
.../eigentrust/storage/consumers/calls.go | 201 ---------
.../eigentrust/storage/consumers/storage.go | 40 --
.../eigentrust/storage/daughters/calls.go | 177 --------
.../eigentrust/storage/daughters/storage.go | 38 --
.../reputation/local/controller/calls.go | 193 ---------
.../reputation/local/controller/controller.go | 84 ----
.../reputation/local/controller/deps.go | 34 --
.../reputation/local/controller/opts.go | 30 --
.../reputation/local/controller/util.go | 32 --
.../reputation/local/routes/builder.go | 59 ---
pkg/services/reputation/local/routes/calls.go | 33 --
.../reputation/local/storage/calls.go | 175 --------
.../reputation/local/storage/storage.go | 41 --
pkg/services/reputation/rpc/response.go | 50 ---
pkg/services/reputation/rpc/server.go | 13 -
pkg/services/reputation/rpc/sign.go | 54 ---
pkg/services/reputation/trust.go | 102 -----
65 files changed, 15 insertions(+), 4956 deletions(-)
delete mode 100644 cmd/frostfs-node/reputation.go
delete mode 100644 cmd/frostfs-node/reputation/common/remote.go
delete mode 100644 cmd/frostfs-node/reputation/common/util.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/calculator.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/consumers.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/contract.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/daughters.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/remote.go
delete mode 100644 cmd/frostfs-node/reputation/intermediate/storage.go
delete mode 100644 cmd/frostfs-node/reputation/internal/client/client.go
delete mode 100644 cmd/frostfs-node/reputation/internal/client/doc.go
delete mode 100644 cmd/frostfs-node/reputation/local/remote.go
delete mode 100644 cmd/frostfs-node/reputation/local/storage.go
delete mode 100644 cmd/frostfs-node/reputation/ticker/fixed.go
delete mode 100644 cmd/frostfs-node/reputation/ticker/fixed_test.go
delete mode 100644 cmd/frostfs-node/timers.go
delete mode 100644 pkg/innerring/processors/reputation/handlers.go
delete mode 100644 pkg/innerring/processors/reputation/process_put.go
delete mode 100644 pkg/innerring/processors/reputation/processor.go
delete mode 100644 pkg/network/transport/reputation/grpc/service.go
delete mode 100644 pkg/services/reputation/common/deps.go
delete mode 100644 pkg/services/reputation/common/managers.go
delete mode 100644 pkg/services/reputation/common/router/calls.go
delete mode 100644 pkg/services/reputation/common/router/deps.go
delete mode 100644 pkg/services/reputation/common/router/opts.go
delete mode 100644 pkg/services/reputation/common/router/router.go
delete mode 100644 pkg/services/reputation/common/router/util.go
delete mode 100644 pkg/services/reputation/eigentrust/calculator/calculator.go
delete mode 100644 pkg/services/reputation/eigentrust/calculator/calls.go
delete mode 100644 pkg/services/reputation/eigentrust/calculator/deps.go
delete mode 100644 pkg/services/reputation/eigentrust/calculator/opts.go
delete mode 100644 pkg/services/reputation/eigentrust/controller/calls.go
delete mode 100644 pkg/services/reputation/eigentrust/controller/controller.go
delete mode 100644 pkg/services/reputation/eigentrust/controller/deps.go
delete mode 100644 pkg/services/reputation/eigentrust/controller/opts.go
delete mode 100644 pkg/services/reputation/eigentrust/iteration.go
delete mode 100644 pkg/services/reputation/eigentrust/routes/builder.go
delete mode 100644 pkg/services/reputation/eigentrust/routes/calls.go
delete mode 100644 pkg/services/reputation/eigentrust/storage/consumers/calls.go
delete mode 100644 pkg/services/reputation/eigentrust/storage/consumers/storage.go
delete mode 100644 pkg/services/reputation/eigentrust/storage/daughters/calls.go
delete mode 100644 pkg/services/reputation/eigentrust/storage/daughters/storage.go
delete mode 100644 pkg/services/reputation/local/controller/calls.go
delete mode 100644 pkg/services/reputation/local/controller/controller.go
delete mode 100644 pkg/services/reputation/local/controller/deps.go
delete mode 100644 pkg/services/reputation/local/controller/opts.go
delete mode 100644 pkg/services/reputation/local/controller/util.go
delete mode 100644 pkg/services/reputation/local/routes/builder.go
delete mode 100644 pkg/services/reputation/local/routes/calls.go
delete mode 100644 pkg/services/reputation/local/storage/calls.go
delete mode 100644 pkg/services/reputation/local/storage/storage.go
delete mode 100644 pkg/services/reputation/rpc/response.go
delete mode 100644 pkg/services/reputation/rpc/server.go
delete mode 100644 pkg/services/reputation/rpc/sign.go
delete mode 100644 pkg/services/reputation/trust.go
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index bd374fcb5..57959c1cf 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -142,7 +142,6 @@ func setWorkersDefaults(cfg *viper.Viper) {
cfg.SetDefault("workers.frostfs", "10")
cfg.SetDefault("workers.container", "10")
cfg.SetDefault("workers.alphabet", "10")
- cfg.SetDefault("workers.reputation", "10")
cfg.SetDefault("workers.subnet", "10")
}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index d110665f5..987d27fcd 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -55,8 +55,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone"
tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
- truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
@@ -414,7 +412,6 @@ type cfg struct {
cfgNodeInfo cfgNodeInfo
cfgNetmap cfgNetmap
cfgControlService cfgControlService
- cfgReputation cfgReputation
cfgObject cfgObject
cfgNotifications cfgNotifications
}
@@ -452,8 +449,6 @@ type cfgMorph struct {
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
- eigenTrustTicker *eigenTrustTickers // timers for EigenTrust iterations
-
proxyScriptHash neogoutil.Uint160
}
@@ -532,16 +527,6 @@ type cfgControlService struct {
server *grpc.Server
}
-type cfgReputation struct {
- workerPool util.WorkerPool // pool for EigenTrust algorithm's iterations
-
- localTrustStorage *truststorage.Storage
-
- localTrustCtrl *trustcontroller.Controller
-
- scriptHash neogoutil.Uint160
-}
-
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
func initCfg(appCfg *config.Config) *cfg {
@@ -582,8 +567,6 @@ func initCfg(appCfg *config.Config) *cfg {
}
c.cfgObject = initCfgObject(appCfg)
- c.cfgReputation = initReputation(appCfg)
-
user.IDFromKey(&c.ownerIDFromKey, key.PrivateKey.PublicKey)
c.metricsCollector = metrics.NewNodeMetrics()
@@ -662,16 +645,6 @@ func initContainer(appCfg *config.Config) cfgContainer {
}
}
-func initReputation(appCfg *config.Config) cfgReputation {
- reputationWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
- fatalOnErr(err)
-
- return cfgReputation{
- scriptHash: contractsconfig.Reputation(appCfg),
- workerPool: reputationWorkerPool,
- }
-}
-
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index d5d8601e3..633d4b261 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -330,7 +330,7 @@ type remoteLoadAnnounceProvider struct {
netmapKeys netmapCore.AnnouncedKeys
clientCache interface {
- Get(client.NodeInfo) (client.Client, error)
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
}
deadEndProvider loadcontroller.WriterProvider
diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go
index 786843b0b..a6ee52464 100644
--- a/cmd/frostfs-node/main.go
+++ b/cmd/frostfs-node/main.go
@@ -102,7 +102,6 @@ func initApp(ctx context.Context, c *cfg) {
initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) })
initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) })
initAndLog(c, "session", initSessionService)
- initAndLog(c, "reputation", func(c *cfg) { initReputationService(ctx, c) })
initAndLog(c, "notification", func(c *cfg) { initNotifications(ctx, c) })
initAndLog(c, "object", initObjectService)
initAndLog(c, "tree", initTreeService)
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 72378d8f3..2e086f994 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -235,8 +235,6 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
zap.String("chain", "side"),
zap.Uint32("block_index", block.Index))
}
-
- tickBlockTimers(c)
})
}
@@ -285,7 +283,6 @@ func lookupScriptHashesInNNS(c *cfg) {
{&c.cfgNetmap.scriptHash, client.NNSNetmapContractName},
{&c.cfgAccounting.scriptHash, client.NNSBalanceContractName},
{&c.cfgContainer.scriptHash, client.NNSContainerContractName},
- {&c.cfgReputation.scriptHash, client.NNSReputationContractName},
{&c.cfgMorph.proxyScriptHash, client.NNSProxyContractName},
}
)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 08a202df9..83025a44c 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -1,7 +1,6 @@
package main
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -12,7 +11,6 @@ import (
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@@ -20,6 +18,7 @@ import (
morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache"
objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc"
objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
@@ -37,15 +36,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
- truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
)
@@ -153,39 +147,12 @@ func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) {
return result, nil
}
-type coreClientConstructor reputationClientConstructor
-
-func (x *coreClientConstructor) Get(info coreclient.NodeInfo) (coreclient.MultiAddressClient, error) {
- c, err := (*reputationClientConstructor)(x).Get(info)
- if err != nil {
- return nil, err
- }
-
- return c.(coreclient.MultiAddressClient), nil
-}
-
func initObjectService(c *cfg) {
keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state)
- clientConstructor := &reputationClientConstructor{
- log: c.log,
- nmSrc: c.netMapSource,
- netState: c.cfgNetmap.state,
- trustStorage: c.cfgReputation.localTrustStorage,
- basicConstructor: c.bgClientCache,
- }
+ c.replicator = createReplicator(c, keyStorage, c.bgClientCache)
- coreConstructor := &coreClientConstructor{
- log: c.log,
- nmSrc: c.netMapSource,
- netState: c.cfgNetmap.state,
- trustStorage: c.cfgReputation.localTrustStorage,
- basicConstructor: c.clientCache,
- }
-
- c.replicator = createReplicator(c, keyStorage, clientConstructor)
-
- addPolicer(c, keyStorage, clientConstructor)
+ addPolicer(c, keyStorage, c.bgClientCache)
traverseGen := util.NewTraverserGenerator(c.netMapSource, c.cfgObject.cnrSource, c)
@@ -193,11 +160,11 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, coreConstructor)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
- sGet := createGetService(c, keyStorage, traverseGen, coreConstructor)
+ sGet := createGetService(c, keyStorage, traverseGen, c.clientCache)
*c.cfgObject.getSvc = *sGet // need smth better
@@ -236,7 +203,7 @@ func initObjectService(c *cfg) {
}
}
-func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) {
+func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) {
ls := c.cfgObject.cfgLocalStorage.localStorage
pol := policer.New(
@@ -288,7 +255,7 @@ func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher {
}
}
-func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputationClientConstructor) *replicator.Replicator {
+func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCache) *replicator.Replicator {
ls := c.cfgObject.cfgLocalStorage.localStorage
return replicator.New(
@@ -298,7 +265,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, clientConstructor *re
),
replicator.WithLocalStorage(ls),
replicator.WithRemoteSender(
- putsvc.NewRemoteSender(keyStorage, (*coreClientConstructor)(clientConstructor)),
+ putsvc.NewRemoteSender(keyStorage, cache),
),
)
}
@@ -319,17 +286,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage) *putsvc.Service {
}
}
- putConstructor := &coreClientConstructor{
- log: c.log,
- nmSrc: c.netMapSource,
- netState: c.cfgNetmap.state,
- trustStorage: c.cfgReputation.localTrustStorage,
- basicConstructor: c.putClientCache,
- }
-
return putsvc.NewService(
putsvc.WithKeyStorage(keyStorage),
- putsvc.WithClientConstructor(putConstructor),
+ putsvc.WithClientConstructor(c.putClientCache),
putsvc.WithMaxSizeSource(newCachedMaxObjectSizeSource(c)),
putsvc.WithObjectStorage(os),
putsvc.WithContainerSource(c.cfgObject.cnrSource),
@@ -348,7 +307,7 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
)
}
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *coreClientConstructor) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -373,7 +332,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage)
}
func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator,
- coreConstructor *coreClientConstructor) *getsvc.Service {
+ coreConstructor *cache.ClientCache) *getsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return getsvc.New(
@@ -480,135 +439,6 @@ func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) {
return eaclInfo, nil
}
-type reputationClientConstructor struct {
- log *logger.Logger
-
- nmSrc netmap.Source
-
- netState netmap.State
-
- trustStorage *truststorage.Storage
-
- basicConstructor interface {
- Get(coreclient.NodeInfo) (coreclient.Client, error)
- }
-}
-
-type reputationClient struct {
- coreclient.MultiAddressClient
-
- prm truststorage.UpdatePrm
-
- cons *reputationClientConstructor
-}
-
-func (c *reputationClient) submitResult(err error) {
- currEpoch := c.cons.netState.CurrentEpoch()
- sat := err == nil
-
- c.cons.log.Debug(
- "writing local reputation values",
- zap.Uint64("epoch", currEpoch),
- zap.Bool("satisfactory", sat),
- )
-
- prm := c.prm
- prm.SetSatisfactory(sat)
- prm.SetEpoch(currEpoch)
-
- c.cons.trustStorage.Update(prm)
-}
-
-func (c *reputationClient) ObjectPutInit(ctx context.Context, prm client.PrmObjectPutInit) (*client.ObjectWriter, error) {
- res, err := c.MultiAddressClient.ObjectPutInit(ctx, prm)
-
- // FIXME: (neofs-node#1193) here we submit only initialization errors, writing errors are not processed
- c.submitResult(err)
-
- return res, err
-}
-
-func (c *reputationClient) ObjectDelete(ctx context.Context, prm client.PrmObjectDelete) (*client.ResObjectDelete, error) {
- res, err := c.MultiAddressClient.ObjectDelete(ctx, prm)
- if err != nil {
- c.submitResult(err)
- } else {
- c.submitResult(apistatus.ErrFromStatus(res.Status()))
- }
-
- return res, err
-}
-
-func (c *reputationClient) GetObjectInit(ctx context.Context, prm client.PrmObjectGet) (*client.ObjectReader, error) {
- res, err := c.MultiAddressClient.ObjectGetInit(ctx, prm)
-
- // FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed
- c.submitResult(err)
-
- return res, err
-}
-
-func (c *reputationClient) ObjectHead(ctx context.Context, prm client.PrmObjectHead) (*client.ResObjectHead, error) {
- res, err := c.MultiAddressClient.ObjectHead(ctx, prm)
-
- c.submitResult(err)
-
- return res, err
-}
-
-func (c *reputationClient) ObjectHash(ctx context.Context, prm client.PrmObjectHash) (*client.ResObjectHash, error) {
- res, err := c.MultiAddressClient.ObjectHash(ctx, prm)
-
- c.submitResult(err)
-
- return res, err
-}
-
-func (c *reputationClient) ObjectSearchInit(ctx context.Context, prm client.PrmObjectSearch) (*client.ObjectListReader, error) {
- res, err := c.MultiAddressClient.ObjectSearchInit(ctx, prm)
-
- // FIXME: (neofs-node#1193) here we submit only initialization errors, reading errors are not processed
- c.submitResult(err)
-
- return res, err
-}
-
-func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.Client, error) {
- cl, err := c.basicConstructor.Get(info)
- if err != nil {
- return nil, err
- }
-
- nm, err := netmap.GetLatestNetworkMap(c.nmSrc)
- if err == nil {
- key := info.PublicKey()
-
- nmNodes := nm.Nodes()
- var peer apireputation.PeerID
-
- for i := range nmNodes {
- if bytes.Equal(nmNodes[i].PublicKey(), key) {
- peer.SetPublicKey(nmNodes[i].PublicKey())
-
- prm := truststorage.UpdatePrm{}
- prm.SetPeer(peer)
-
- return &reputationClient{
- MultiAddressClient: cl.(coreclient.MultiAddressClient),
- prm: prm,
- cons: c,
- }, nil
- }
- }
- } else {
- c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient,
- zap.String("error", err.Error()),
- )
- }
-
- return cl, nil
-}
-
type engineWithNotifications struct {
base putsvc.ObjectStorage
nw notificationWriter
diff --git a/cmd/frostfs-node/reputation.go b/cmd/frostfs-node/reputation.go
deleted file mode 100644
index b3acf7eb0..000000000
--- a/cmd/frostfs-node/reputation.go
+++ /dev/null
@@ -1,386 +0,0 @@
-package main
-
-import (
- "context"
- "fmt"
-
- v2reputation "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- v2reputationgrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
- intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
- localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
- grpcreputation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/reputation/grpc"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller"
- intermediateroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/routes"
- consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
- localtrustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
- localroutes "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/routes"
- truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
- reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-func initReputationService(ctx context.Context, c *cfg) {
- wrap, err := repClient.NewFromMorph(c.cfgMorph.client, c.cfgReputation.scriptHash, 0, repClient.TryNotary())
- fatalOnErr(err)
-
- localKey := c.key.PublicKey().Bytes()
-
- nmSrc := c.netMapSource
-
- // storing calculated trusts as a daughter
- c.cfgReputation.localTrustStorage = truststorage.New(
- truststorage.Prm{},
- )
-
- daughterStorage := daughters.New(daughters.Prm{})
- consumerStorage := consumerstorage.New(consumerstorage.Prm{})
-
- localTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "local"))}
-
- managerBuilder := reputationcommon.NewManagerBuilder(
- reputationcommon.ManagersPrm{
- NetMapSource: nmSrc,
- },
- reputationcommon.WithLogger(c.log),
- )
-
- localRouteBuilder := localroutes.New(
- localroutes.Prm{
- ManagerBuilder: managerBuilder,
- Log: localTrustLogger,
- },
- )
-
- localTrustRouter := createLocalTrustRouter(c, localRouteBuilder, localTrustLogger, daughterStorage)
-
- intermediateTrustRouter := createIntermediateTrustRouter(c, consumerStorage, managerBuilder)
-
- eigenTrustController := createEigenTrustController(c, intermediateTrustRouter, localKey, wrap, daughterStorage, consumerStorage)
-
- c.cfgReputation.localTrustCtrl = createLocalTrustController(c, localTrustLogger, localKey, localTrustRouter)
-
- addReputationReportHandler(ctx, c)
-
- server := grpcreputation.New(
- reputationrpc.NewSignService(
- &c.key.PrivateKey,
- reputationrpc.NewResponseService(
- &reputationServer{
- cfg: c,
- log: c.log,
- localRouter: localTrustRouter,
- intermediateRouter: intermediateTrustRouter,
- routeBuilder: localRouteBuilder,
- },
- c.respSvc,
- ),
- ),
- )
-
- for _, srv := range c.cfgGRPC.servers {
- v2reputationgrpc.RegisterReputationServiceServer(srv, server)
- }
-
- // initialize eigen trust block timer
- newEigenTrustIterTimer(c)
-
- addEigenTrustEpochHandler(ctx, c, eigenTrustController)
-}
-
-func addReputationReportHandler(ctx context.Context, c *cfg) {
- addNewEpochAsyncNotificationHandler(
- c,
- func(ev event.Event) {
- c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent)
-
- var reportPrm localtrustcontroller.ReportPrm
-
- // report collected values from previous epoch
- reportPrm.SetEpoch(ev.(netmap.NewEpoch).EpochNumber() - 1)
-
- c.cfgReputation.localTrustCtrl.Report(ctx, reportPrm)
- },
- )
-}
-
-func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController *eigentrustctrl.Controller) {
- addNewEpochAsyncNotificationHandler(
- c,
- func(e event.Event) {
- epoch := e.(netmap.NewEpoch).EpochNumber()
-
- log := c.log.With(zap.Uint64("epoch", epoch))
-
- duration, err := c.cfgNetmap.wrapper.EpochDuration()
- if err != nil {
- log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err))
- return
- }
-
- iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
- if err != nil {
- log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err))
- return
- }
-
- epochTimer, err := ticker.NewIterationsTicker(duration, iterations, func() {
- eigenTrustController.Continue(ctx,
- eigentrustctrl.ContinuePrm{
- Epoch: epoch - 1,
- },
- )
- })
- if err != nil {
- log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err))
- return
- }
-
- c.cfgMorph.eigenTrustTicker.addEpochTimer(epoch, epochTimer)
- },
- )
-}
-
-func createLocalTrustRouter(c *cfg, localRouteBuilder *localroutes.Builder, localTrustLogger *logger.Logger, daughterStorage *daughters.Storage) *reputationrouter.Router {
- // storing received daughter(of current node) trusts as a manager
- daughterStorageWriterProvider := &intermediatereputation.DaughterStorageWriterProvider{
- Log: c.log,
- Storage: daughterStorage,
- }
-
- remoteLocalTrustProvider := common.NewRemoteTrustProvider(
- common.RemoteProviderPrm{
- NetmapKeys: c,
- DeadEndProvider: daughterStorageWriterProvider,
- ClientCache: c.bgClientCache,
- WriterProvider: localreputation.NewRemoteProvider(
- localreputation.RemoteProviderPrm{
- Key: &c.key.PrivateKey,
- Log: localTrustLogger,
- },
- ),
- Log: localTrustLogger,
- },
- )
-
- localTrustRouter := reputationrouter.New(
- reputationrouter.Prm{
- LocalServerInfo: c,
- RemoteWriterProvider: remoteLocalTrustProvider,
- Builder: localRouteBuilder,
- },
- reputationrouter.WithLogger(localTrustLogger))
- return localTrustRouter
-}
-
-func createIntermediateTrustRouter(c *cfg, consumerStorage *consumerstorage.Storage, managerBuilder reputationcommon.ManagerBuilder) *reputationrouter.Router {
- intermediateTrustLogger := &logger.Logger{Logger: c.log.With(zap.String("trust_type", "intermediate"))}
-
- consumerStorageWriterProvider := &intermediatereputation.ConsumerStorageWriterProvider{
- Log: c.log,
- Storage: consumerStorage,
- }
-
- remoteIntermediateTrustProvider := common.NewRemoteTrustProvider(
- common.RemoteProviderPrm{
- NetmapKeys: c,
- DeadEndProvider: consumerStorageWriterProvider,
- ClientCache: c.bgClientCache,
- WriterProvider: intermediatereputation.NewRemoteProvider(
- intermediatereputation.RemoteProviderPrm{
- Key: &c.key.PrivateKey,
- Log: intermediateTrustLogger,
- },
- ),
- Log: intermediateTrustLogger,
- },
- )
-
- intermediateRouteBuilder := intermediateroutes.New(
- intermediateroutes.Prm{
- ManagerBuilder: managerBuilder,
- Log: intermediateTrustLogger,
- },
- )
-
- intermediateTrustRouter := reputationrouter.New(
- reputationrouter.Prm{
- LocalServerInfo: c,
- RemoteWriterProvider: remoteIntermediateTrustProvider,
- Builder: intermediateRouteBuilder,
- },
- reputationrouter.WithLogger(intermediateTrustLogger),
- )
- return intermediateTrustRouter
-}
-
-func createEigenTrustController(c *cfg, intermediateTrustRouter *reputationrouter.Router, localKey []byte, wrap *repClient.Client,
- daughterStorage *daughters.Storage, consumerStorage *consumerstorage.Storage) *eigentrustctrl.Controller {
- eigenTrustCalculator := eigentrustcalc.New(
- eigentrustcalc.Prm{
- AlphaProvider: c.cfgNetmap.wrapper,
- InitialTrustSource: intermediatereputation.InitialTrustSource{
- NetMap: c.netMapSource,
- },
- IntermediateValueTarget: intermediateTrustRouter,
- WorkerPool: c.cfgReputation.workerPool,
- FinalResultTarget: intermediatereputation.NewFinalWriterProvider(
- intermediatereputation.FinalWriterProviderPrm{
- PrivatKey: &c.key.PrivateKey,
- PubKey: localKey,
- Client: wrap,
- },
- intermediatereputation.FinalWriterWithLogger(c.log),
- ),
- DaughterTrustSource: &intermediatereputation.DaughterTrustIteratorProvider{
- DaughterStorage: daughterStorage,
- ConsumerStorage: consumerStorage,
- },
- },
- eigentrustcalc.WithLogger(c.log),
- )
-
- eigenTrustController := eigentrustctrl.New(
- eigentrustctrl.Prm{
- DaughtersTrustCalculator: &intermediatereputation.DaughtersTrustCalculator{
- Calculator: eigenTrustCalculator,
- },
- IterationsProvider: c.cfgNetmap.wrapper,
- WorkerPool: c.cfgReputation.workerPool,
- },
- eigentrustctrl.WithLogger(c.log),
- )
- return eigenTrustController
-}
-
-func createLocalTrustController(c *cfg, localTrustLogger *logger.Logger, localKey []byte, localTrustRouter *reputationrouter.Router) *localtrustcontroller.Controller {
- localTrustStorage := &localreputation.TrustStorage{
- Log: localTrustLogger,
- Storage: c.cfgReputation.localTrustStorage,
- NmSrc: c.netMapSource,
- LocalKey: localKey,
- }
-
- return localtrustcontroller.New(
- localtrustcontroller.Prm{
- LocalTrustSource: localTrustStorage,
- LocalTrustTarget: localTrustRouter,
- },
- localtrustcontroller.WithLogger(c.log),
- )
-}
-
-type reputationServer struct {
- *cfg
- log *logger.Logger
- localRouter *reputationrouter.Router
- intermediateRouter *reputationrouter.Router
- routeBuilder reputationrouter.Builder
-}
-
-func (s *reputationServer) AnnounceLocalTrust(ctx context.Context, req *v2reputation.AnnounceLocalTrustRequest) (*v2reputation.AnnounceLocalTrustResponse, error) {
- passedRoute := reverseRoute(req.GetVerificationHeader())
- passedRoute = append(passedRoute, s)
-
- body := req.GetBody()
-
- ep := &common.EpochProvider{
- E: body.GetEpoch(),
- }
-
- w, err := s.localRouter.InitWriter(reputationrouter.NewRouteInfo(ep, passedRoute))
- if err != nil {
- return nil, fmt.Errorf("could not initialize local trust writer: %w", err)
- }
-
- for _, trust := range body.GetTrusts() {
- err = s.processLocalTrust(ctx, body.GetEpoch(), apiToLocalTrust(&trust, passedRoute[0].PublicKey()), passedRoute, w)
- if err != nil {
- return nil, fmt.Errorf("could not write one of local trusts: %w", err)
- }
- }
-
- resp := new(v2reputation.AnnounceLocalTrustResponse)
- resp.SetBody(new(v2reputation.AnnounceLocalTrustResponseBody))
-
- return resp, nil
-}
-
-func (s *reputationServer) AnnounceIntermediateResult(ctx context.Context, req *v2reputation.AnnounceIntermediateResultRequest) (*v2reputation.AnnounceIntermediateResultResponse, error) {
- passedRoute := reverseRoute(req.GetVerificationHeader())
- passedRoute = append(passedRoute, s)
-
- body := req.GetBody()
-
- ei := eigentrust.NewEpochIteration(body.GetEpoch(), body.GetIteration())
-
- w, err := s.intermediateRouter.InitWriter(reputationrouter.NewRouteInfo(ei, passedRoute))
- if err != nil {
- return nil, fmt.Errorf("could not initialize trust writer: %w", err)
- }
-
- v2Trust := body.GetTrust()
-
- trust := apiToLocalTrust(v2Trust.GetTrust(), v2Trust.GetTrustingPeer().GetPublicKey())
-
- err = w.Write(ctx, trust)
- if err != nil {
- return nil, fmt.Errorf("could not write trust: %w", err)
- }
-
- resp := new(v2reputation.AnnounceIntermediateResultResponse)
- resp.SetBody(new(v2reputation.AnnounceIntermediateResultResponseBody))
-
- return resp, nil
-}
-
-func (s *reputationServer) processLocalTrust(ctx context.Context, epoch uint64, t reputation.Trust,
- passedRoute []reputationcommon.ServerInfo, w reputationcommon.Writer) error {
- err := reputationrouter.CheckRoute(s.routeBuilder, epoch, t, passedRoute)
- if err != nil {
- return fmt.Errorf("wrong route of reputation trust value: %w", err)
- }
-
- return w.Write(ctx, t)
-}
-
-// apiToLocalTrust converts v2 Trust to local reputation.Trust, adding trustingPeer.
-func apiToLocalTrust(t *v2reputation.Trust, trustingPeer []byte) reputation.Trust {
- var trusted, trusting apireputation.PeerID
- trusted.SetPublicKey(t.GetPeer().GetPublicKey())
- trusting.SetPublicKey(trustingPeer)
-
- localTrust := reputation.Trust{}
-
- localTrust.SetValue(reputation.TrustValueFromFloat64(t.GetValue()))
- localTrust.SetPeer(trusted)
- localTrust.SetTrustingPeer(trusting)
-
- return localTrust
-}
-
-func reverseRoute(hdr *session.RequestVerificationHeader) (passedRoute []reputationcommon.ServerInfo) {
- for hdr != nil {
- passedRoute = append(passedRoute, &common.OnlyKeyRemoteServerInfo{
- Key: hdr.GetBodySignature().GetKey(),
- })
-
- hdr = hdr.GetOrigin()
- }
-
- return
-}
diff --git a/cmd/frostfs-node/reputation/common/remote.go b/cmd/frostfs-node/reputation/common/remote.go
deleted file mode 100644
index f1982301f..000000000
--- a/cmd/frostfs-node/reputation/common/remote.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package common
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-type clientCache interface {
- Get(client.NodeInfo) (client.Client, error)
-}
-
-// clientKeyRemoteProvider must provide a remote writer and take into account
-// that requests must be sent via the passed api client and must be signed with
-// the passed private key.
-type clientKeyRemoteProvider interface {
- WithClient(client.Client) reputationcommon.WriterProvider
-}
-
-// RemoteTrustProvider is an implementation of reputation RemoteWriterProvider interface.
-// It caches clients, checks if it is the end of the route and checks either the current
-// node is a remote target or not.
-//
-// remoteTrustProvider requires to be provided with clientKeyRemoteProvider.
-type RemoteTrustProvider struct {
- netmapKeys netmap.AnnouncedKeys
- deadEndProvider reputationcommon.WriterProvider
- clientCache clientCache
- remoteProvider clientKeyRemoteProvider
- log *logger.Logger
-}
-
-// RemoteProviderPrm groups the required parameters of the remoteTrustProvider's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type RemoteProviderPrm struct {
- NetmapKeys netmap.AnnouncedKeys
- DeadEndProvider reputationcommon.WriterProvider
- ClientCache clientCache
- WriterProvider clientKeyRemoteProvider
- Log *logger.Logger
-}
-
-func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
- switch {
- case prm.NetmapKeys == nil:
- PanicOnPrmValue("NetmapKeys", prm.NetmapKeys)
- case prm.DeadEndProvider == nil:
- PanicOnPrmValue("DeadEndProvider", prm.DeadEndProvider)
- case prm.ClientCache == nil:
- PanicOnPrmValue("ClientCache", prm.ClientCache)
- case prm.WriterProvider == nil:
- PanicOnPrmValue("WriterProvider", prm.WriterProvider)
- case prm.Log == nil:
- PanicOnPrmValue("Logger", prm.Log)
- }
-
- return &RemoteTrustProvider{
- netmapKeys: prm.NetmapKeys,
- deadEndProvider: prm.DeadEndProvider,
- clientCache: prm.ClientCache,
- remoteProvider: prm.WriterProvider,
- log: prm.Log,
- }
-}
-
-func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
- rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider)
-
- if srv == nil {
- rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider)
- return rtp.deadEndProvider, nil
- }
-
- if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
- // if local => return no-op writer
- rtp.log.Debug(logs.CommonInitializingNoopWriterProvider)
- return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
- }
-
- var info client.NodeInfo
-
- err := client.NodeInfoFromRawNetmapElement(&info, srv)
- if err != nil {
- return nil, fmt.Errorf("parse client node info: %w", err)
- }
-
- c, err := rtp.clientCache.Get(info)
- if err != nil {
- return nil, fmt.Errorf("could not initialize API client: %w", err)
- }
-
- return rtp.remoteProvider.WithClient(c), nil
-}
diff --git a/cmd/frostfs-node/reputation/common/util.go b/cmd/frostfs-node/reputation/common/util.go
deleted file mode 100644
index 443adb388..000000000
--- a/cmd/frostfs-node/reputation/common/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package common
-
-import (
- "context"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
-)
-
-type EpochProvider struct {
- E uint64
-}
-
-func (ep *EpochProvider) Epoch() uint64 {
- return ep.E
-}
-
-type NopReputationWriter struct{}
-
-func (NopReputationWriter) Write(context.Context, reputation.Trust) error {
- return nil
-}
-
-func (NopReputationWriter) Close(context.Context) error {
- return nil
-}
-
-// OnlyKeyRemoteServerInfo is an implementation of reputation.ServerInfo
-// interface but with only public key data.
-type OnlyKeyRemoteServerInfo struct {
- Key []byte
-}
-
-func (i *OnlyKeyRemoteServerInfo) PublicKey() []byte {
- return i.Key
-}
-
-func (*OnlyKeyRemoteServerInfo) IterateAddresses(func(string) bool) {
-}
-
-func (*OnlyKeyRemoteServerInfo) NumberOfAddresses() int {
- return 0
-}
-
-func (*OnlyKeyRemoteServerInfo) ExternalAddresses() []string {
- return nil
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func PanicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/calculator.go b/cmd/frostfs-node/reputation/intermediate/calculator.go
deleted file mode 100644
index 73dd12311..000000000
--- a/cmd/frostfs-node/reputation/intermediate/calculator.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package intermediate
-
-import (
- "context"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- eigentrustctrl "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/controller"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// InitialTrustSource is an implementation of the
-// reputation/eigentrust/calculator's InitialTrustSource interface.
-type InitialTrustSource struct {
- NetMap netmap.Source
-}
-
-var ErrEmptyNetMap = errors.New("empty NepMap")
-
-// InitialTrust returns `initialTrust` as an initial trust value.
-func (i InitialTrustSource) InitialTrust(apireputation.PeerID) (reputation.TrustValue, error) {
- nm, err := i.NetMap.GetNetMap(1)
- if err != nil {
- return reputation.TrustZero, fmt.Errorf("failed to get NetMap: %w", err)
- }
-
- nodeCount := reputation.TrustValueFromFloat64(float64(len(nm.Nodes())))
- if nodeCount == 0 {
- return reputation.TrustZero, ErrEmptyNetMap
- }
-
- return reputation.TrustOne.Div(nodeCount), nil
-}
-
-// DaughtersTrustCalculator wraps EigenTrust calculator and implements the
-// eigentrust/calculator's DaughtersTrustCalculator interface.
-type DaughtersTrustCalculator struct {
- Calculator *eigencalc.Calculator
-}
-
-// Calculate converts and passes values to the wrapped calculator.
-func (c *DaughtersTrustCalculator) Calculate(ctx context.Context, iterCtx eigentrustctrl.IterationContext) {
- calcPrm := eigencalc.CalculatePrm{}
- epochIteration := eigentrust.EpochIteration{}
-
- epochIteration.SetEpoch(iterCtx.Epoch())
- epochIteration.SetI(iterCtx.I())
-
- calcPrm.SetLast(iterCtx.Last())
- calcPrm.SetEpochIteration(epochIteration)
-
- c.Calculator.Calculate(ctx, calcPrm)
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/consumers.go b/cmd/frostfs-node/reputation/intermediate/consumers.go
deleted file mode 100644
index 02cdb2a2b..000000000
--- a/cmd/frostfs-node/reputation/intermediate/consumers.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package intermediate
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigencalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-var ErrIncorrectContextPanicMsg = "could not write intermediate trust: passed context incorrect"
-
-// ConsumerStorageWriterProvider is an implementation of the reputation.WriterProvider
-// interface that provides ConsumerTrustWriter writer.
-type ConsumerStorageWriterProvider struct {
- Log *logger.Logger
- Storage *consumerstorage.Storage
-}
-
-// ConsumerTrustWriter is an implementation of the reputation.Writer interface
-// that writes passed consumer's Trust values to the Consumer storage. After writing
-// that, values can be used in eigenTrust algorithm's iterations.
-type ConsumerTrustWriter struct {
- log *logger.Logger
- storage *consumerstorage.Storage
- iterInfo eigencalc.EpochIterationInfo
-}
-
-func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
- w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts,
- zap.Uint64("epoch", w.iterInfo.Epoch()),
- zap.Uint32("iteration", w.iterInfo.I()),
- zap.Stringer("trusting_peer", t.TrustingPeer()),
- zap.Stringer("trusted_peer", t.Peer()),
- )
-
- trust := eigentrust.IterationTrust{Trust: t}
-
- trust.SetEpoch(w.iterInfo.Epoch())
- trust.SetI(w.iterInfo.I())
-
- w.storage.Put(trust)
- return nil
-}
-
-func (w *ConsumerTrustWriter) Close(context.Context) error {
- return nil
-}
-
-func (s *ConsumerStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
- iterInfo, ok := ep.(eigencalc.EpochIterationInfo)
- if !ok {
- panic(ErrIncorrectContextPanicMsg)
- }
-
- return &ConsumerTrustWriter{
- log: s.Log,
- storage: s.Storage,
- iterInfo: iterInfo,
- }, nil
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/contract.go b/cmd/frostfs-node/reputation/intermediate/contract.go
deleted file mode 100644
index 2d83598bc..000000000
--- a/cmd/frostfs-node/reputation/intermediate/contract.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package intermediate
-
-import (
- "crypto/ecdsa"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-// FinalWriterProviderPrm groups the required parameters of the FinalWriterProvider's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type FinalWriterProviderPrm struct {
- PrivatKey *ecdsa.PrivateKey
- PubKey []byte
- Client *repClient.Client
-}
-
-// NewFinalWriterProvider creates a new instance of the FinalWriterProvider.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created FinalWriterProvider does not require additional
-// initialization and is completely ready for work.
-func NewFinalWriterProvider(prm FinalWriterProviderPrm, opts ...FinalWriterOption) *FinalWriterProvider {
- o := defaultFinalWriterOptionsOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &FinalWriterProvider{
- prm: prm,
- opts: o,
- }
-}
-
-// FinalWriterProvider is an implementation of the reputation.eigentrust.calculator
-// IntermediateWriterProvider interface. It inits FinalWriter.
-type FinalWriterProvider struct {
- prm FinalWriterProviderPrm
- opts *finalWriterOptions
-}
-
-func (fwp FinalWriterProvider) InitIntermediateWriter(
- _ eigentrustcalc.EpochIterationInfo) (eigentrustcalc.IntermediateWriter, error) {
- return &FinalWriter{
- privatKey: fwp.prm.PrivatKey,
- pubKey: fwp.prm.PubKey,
- client: fwp.prm.Client,
- l: fwp.opts.log,
- }, nil
-}
-
-// FinalWriter is an implementation of the reputation.eigentrust.calculator IntermediateWriter
-// interface that writes GlobalTrust to contract directly.
-type FinalWriter struct {
- privatKey *ecdsa.PrivateKey
- pubKey []byte
- client *repClient.Client
-
- l *logger.Logger
-}
-
-func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
- fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract)
-
- args := repClient.PutPrm{}
-
- apiTrustedPeerID := t.Peer()
-
- var apiTrust apireputation.Trust
- apiTrust.SetValue(t.Value().Float64())
- apiTrust.SetPeer(t.Peer())
-
- var managerPublicKey [33]byte
- copy(managerPublicKey[:], fw.pubKey)
-
- var apiMangerPeerID apireputation.PeerID
- apiMangerPeerID.SetPublicKey(managerPublicKey[:])
-
- var gTrust apireputation.GlobalTrust
- gTrust.SetTrust(apiTrust)
- gTrust.SetManager(apiMangerPeerID)
-
- err := gTrust.Sign(frostfsecdsa.Signer(*fw.privatKey))
- if err != nil {
- fw.l.Debug(
- "failed to sign global trust",
- zap.Error(err),
- )
- return fmt.Errorf("failed to sign global trust: %w", err)
- }
-
- args.SetEpoch(t.Epoch())
- args.SetValue(gTrust)
- args.SetPeerID(apiTrustedPeerID)
-
- err = fw.client.Put(
- args,
- )
- if err != nil {
- fw.l.Debug(
- "failed to write global trust to contract",
- zap.Error(err),
- )
- return fmt.Errorf("failed to write global trust to contract: %w", err)
- }
-
- fw.l.Debug(
- "sent global trust to contract",
- zap.Uint64("epoch", t.Epoch()),
- zap.Float64("value", t.Value().Float64()),
- zap.Stringer("peer", t.Peer()),
- )
-
- return nil
-}
-
-type finalWriterOptions struct {
- log *logger.Logger
-}
-
-type FinalWriterOption func(*finalWriterOptions)
-
-func defaultFinalWriterOptionsOpts() *finalWriterOptions {
- return &finalWriterOptions{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-func FinalWriterWithLogger(l *logger.Logger) FinalWriterOption {
- return func(o *finalWriterOptions) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/daughters.go b/cmd/frostfs-node/reputation/intermediate/daughters.go
deleted file mode 100644
index 30237537c..000000000
--- a/cmd/frostfs-node/reputation/intermediate/daughters.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package intermediate
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// DaughterStorageWriterProvider is an implementation of the reputation.WriterProvider
-// interface that provides DaughterTrustWriter writer.
-type DaughterStorageWriterProvider struct {
- Log *logger.Logger
- Storage *daughters.Storage
-}
-
-// DaughterTrustWriter is an implementation of the reputation.Writer interface
-// that writes passed daughter's Trust values to Daughter storage. After writing
-// that, values can be used in eigenTrust algorithm's iterations.
-type DaughterTrustWriter struct {
- log *logger.Logger
- storage *daughters.Storage
- ep reputationcommon.EpochProvider
-}
-
-func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
- w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts,
- zap.Uint64("epoch", w.ep.Epoch()),
- zap.Stringer("trusting_peer", t.TrustingPeer()),
- zap.Stringer("trusted_peer", t.Peer()),
- )
-
- w.storage.Put(w.ep.Epoch(), t)
- return nil
-}
-
-func (w *DaughterTrustWriter) Close(context.Context) error {
- return nil
-}
-
-func (s *DaughterStorageWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
- return &DaughterTrustWriter{
- log: s.Log,
- storage: s.Storage,
- ep: ep,
- }, nil
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/remote.go b/cmd/frostfs-node/reputation/intermediate/remote.go
deleted file mode 100644
index 8087463b5..000000000
--- a/cmd/frostfs-node/reputation/intermediate/remote.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package intermediate
-
-import (
- "context"
- "crypto/ecdsa"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type RemoteProviderPrm struct {
- Key *ecdsa.PrivateKey
- Log *logger.Logger
-}
-
-// NewRemoteProvider creates a new instance of the RemoteProvider.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created RemoteProvider does not require additional
-// initialization and is completely ready for work.
-func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider {
- switch {
- case prm.Key == nil:
- common.PanicOnPrmValue("NetMapSource", prm.Key)
- case prm.Log == nil:
- common.PanicOnPrmValue("Logger", prm.Log)
- }
-
- return &RemoteProvider{
- key: prm.Key,
- log: prm.Log,
- }
-}
-
-// RemoteProvider is an implementation of the clientKeyRemoteProvider interface.
-type RemoteProvider struct {
- key *ecdsa.PrivateKey
- log *logger.Logger
-}
-
-func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider {
- return &TrustWriterProvider{
- client: c,
- key: rp.key,
- log: rp.log,
- }
-}
-
-type TrustWriterProvider struct {
- client coreclient.Client
- key *ecdsa.PrivateKey
- log *logger.Logger
-}
-
-func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
- iterInfo, ok := ep.(eigentrustcalc.EpochIterationInfo)
- if !ok {
- // TODO: #1164 think if this can be done without such limitation
- panic(ErrIncorrectContextPanicMsg)
- }
-
- return &RemoteTrustWriter{
- iterInfo: iterInfo,
- client: twp.client,
- key: twp.key,
- log: twp.log,
- }, nil
-}
-
-type RemoteTrustWriter struct {
- iterInfo eigentrustcalc.EpochIterationInfo
- client coreclient.Client
- key *ecdsa.PrivateKey
- log *logger.Logger
-}
-
-// Write sends a trust value to a remote node via ReputationService.AnnounceIntermediateResult RPC.
-func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) error {
- epoch := rtp.iterInfo.Epoch()
- i := rtp.iterInfo.I()
-
- rtp.log.Debug(logs.IntermediateAnnouncingTrust,
- zap.Uint64("epoch", epoch),
- zap.Uint32("iteration", i),
- zap.Stringer("trusting_peer", t.TrustingPeer()),
- zap.Stringer("trusted_peer", t.Peer()),
- )
-
- var apiTrust reputationapi.Trust
- apiTrust.SetValue(t.Value().Float64())
- apiTrust.SetPeer(t.Peer())
-
- var apiPeerToPeerTrust reputationapi.PeerToPeerTrust
- apiPeerToPeerTrust.SetTrustingPeer(t.TrustingPeer())
- apiPeerToPeerTrust.SetTrust(apiTrust)
-
- var p internalclient.AnnounceIntermediatePrm
-
- p.SetClient(rtp.client)
- p.SetEpoch(epoch)
- p.SetIteration(i)
- p.SetTrust(apiPeerToPeerTrust)
-
- _, err := internalclient.AnnounceIntermediate(ctx, p)
-
- return err
-}
-
-func (rtp *RemoteTrustWriter) Close(context.Context) error {
- return nil
-}
diff --git a/cmd/frostfs-node/reputation/intermediate/storage.go b/cmd/frostfs-node/reputation/intermediate/storage.go
deleted file mode 100644
index db29ff92b..000000000
--- a/cmd/frostfs-node/reputation/intermediate/storage.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package intermediate
-
-import (
- "fmt"
-
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- consumerstorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/consumers"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// DaughterTrustIteratorProvider is an implementation of the
-// reputation/eigentrust/calculator's DaughterTrustIteratorProvider interface.
-type DaughterTrustIteratorProvider struct {
- DaughterStorage *daughters.Storage
- ConsumerStorage *consumerstorage.Storage
-}
-
-// InitDaughterIterator returns an iterator over the received
-// local trusts for ctx.Epoch() epoch from daughter p.
-func (ip *DaughterTrustIteratorProvider) InitDaughterIterator(ctx eigentrustcalc.EpochIterationInfo,
- p apireputation.PeerID) (eigentrustcalc.TrustIterator, error) {
- epoch := ctx.Epoch()
-
- daughterIterator, ok := ip.DaughterStorage.DaughterTrusts(epoch, p)
- if !ok {
- return nil, fmt.Errorf("no data in %d epoch for daughter: %s", epoch, p)
- }
-
- return daughterIterator, nil
-}
-
-// InitAllDaughtersIterator returns an iterator over all
-// daughters of the current node(manager) and all local
-// trusts received from them for ctx.Epoch() epoch.
-func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator(
- ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
- epoch := ctx.Epoch()
-
- iter, ok := ip.DaughterStorage.AllDaughterTrusts(epoch)
- if !ok {
- return nil, fmt.Errorf("no data in %d epoch for daughters", epoch)
- }
-
- return iter, nil
-}
-
-// InitConsumersIterator returns an iterator over all daughters
-// of the current node(manager) and all their consumers' local
-// trusts for ctx.Epoch() epoch and ctx.I() iteration.
-func (ip *DaughterTrustIteratorProvider) InitConsumersIterator(
- ctx eigentrustcalc.EpochIterationInfo) (eigentrustcalc.PeerTrustsIterator, error) {
- epoch, iter := ctx.Epoch(), ctx.I()
-
- consumerIterator, ok := ip.ConsumerStorage.Consumers(epoch, iter)
- if !ok {
- return nil, fmt.Errorf("no data for %d iteration in %d epoch for consumers's trusts",
- iter,
- epoch,
- )
- }
-
- return consumerIterator, nil
-}
diff --git a/cmd/frostfs-node/reputation/internal/client/client.go b/cmd/frostfs-node/reputation/internal/client/client.go
deleted file mode 100644
index ff5131262..000000000
--- a/cmd/frostfs-node/reputation/internal/client/client.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package internal
-
-import (
- "context"
-
- coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type commonPrm struct {
- cli coreclient.Client
-}
-
-// SetClient sets the base client for FrostFS API communication.
-//
-// Required parameter.
-func (x *commonPrm) SetClient(cli coreclient.Client) {
- x.cli = cli
-}
-
-// AnnounceLocalPrm groups parameters of AnnounceLocal operation.
-type AnnounceLocalPrm struct {
- commonPrm
-
- cliPrm client.PrmAnnounceLocalTrust
-}
-
-// SetEpoch sets the epoch in which the trust was assessed.
-func (x *AnnounceLocalPrm) SetEpoch(epoch uint64) {
- x.cliPrm.SetEpoch(epoch)
-}
-
-// SetTrusts sets a list of local trust values.
-func (x *AnnounceLocalPrm) SetTrusts(ts []reputation.Trust) {
- x.cliPrm.SetValues(ts)
-}
-
-// AnnounceLocalRes groups the resulting values of AnnounceLocal operation.
-type AnnounceLocalRes struct{}
-
-// AnnounceLocal sends estimations of local trust to the remote node.
-//
-// Client, context and key must be set.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func AnnounceLocal(ctx context.Context, prm AnnounceLocalPrm) (res AnnounceLocalRes, err error) {
- var cliRes *client.ResAnnounceLocalTrust
-
- cliRes, err = prm.cli.AnnounceLocalTrust(ctx, prm.cliPrm)
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(cliRes.Status())
- }
-
- return
-}
-
-// AnnounceIntermediatePrm groups parameters of AnnounceIntermediate operation.
-type AnnounceIntermediatePrm struct {
- commonPrm
-
- cliPrm client.PrmAnnounceIntermediateTrust
-}
-
-// SetEpoch sets the number of the epoch when the trust calculation's iteration was executed.
-func (x *AnnounceIntermediatePrm) SetEpoch(epoch uint64) {
- x.cliPrm.SetEpoch(epoch)
-}
-
-// SetIteration sets the number of the iteration of the trust calculation algorithm.
-func (x *AnnounceIntermediatePrm) SetIteration(iter uint32) {
- x.cliPrm.SetIteration(iter)
-}
-
-// SetTrust sets the current global trust value computed at the iteration.
-func (x *AnnounceIntermediatePrm) SetTrust(t reputation.PeerToPeerTrust) {
- x.cliPrm.SetCurrentValue(t)
-}
-
-// AnnounceIntermediateRes groups the resulting values of AnnounceIntermediate operation.
-type AnnounceIntermediateRes struct{}
-
-// AnnounceIntermediate sends the global trust value calculated at the specified iteration
-// and epoch to to the remote node.
-//
-// Client, context and key must be set.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func AnnounceIntermediate(ctx context.Context, prm AnnounceIntermediatePrm) (res AnnounceIntermediateRes, err error) {
- var cliRes *client.ResAnnounceIntermediateTrust
-
- cliRes, err = prm.cli.AnnounceIntermediateTrust(ctx, prm.cliPrm)
- if err == nil {
- // pull out an error from status
- err = apistatus.ErrFromStatus(cliRes.Status())
- }
-
- return
-}
diff --git a/cmd/frostfs-node/reputation/internal/client/doc.go b/cmd/frostfs-node/reputation/internal/client/doc.go
deleted file mode 100644
index 1dc66cee6..000000000
--- a/cmd/frostfs-node/reputation/internal/client/doc.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Package internal provides functionality for FrostFS Node Reputation system communication with FrostFS network.
-// The base client for accessing remote nodes via FrostFS API is a FrostFS SDK Go API client.
-// However, although it encapsulates a useful piece of business logic (e.g. the signature mechanism),
-// the Reputation service does not fully use the client's flexible interface.
-//
-// In this regard, this package provides functions over base API client necessary for the application.
-// This allows you to concentrate the entire spectrum of the client's use in one place (this will be convenient
-// both when updating the base client and for evaluating the UX of SDK library). So, it is expected that all
-// Reputation service packages will be limited to this package for the development of functionality requiring
-// FrostFS API communication.
-package internal
diff --git a/cmd/frostfs-node/reputation/local/remote.go b/cmd/frostfs-node/reputation/local/remote.go
deleted file mode 100644
index 6197c6d69..000000000
--- a/cmd/frostfs-node/reputation/local/remote.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package local
-
-import (
- "context"
- "crypto/ecdsa"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- reputationapi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-// RemoteProviderPrm groups the required parameters of the RemoteProvider's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type RemoteProviderPrm struct {
- Key *ecdsa.PrivateKey
- Log *logger.Logger
-}
-
-// NewRemoteProvider creates a new instance of the RemoteProvider.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created RemoteProvider does not require additional
-// initialization and is completely ready for work.
-func NewRemoteProvider(prm RemoteProviderPrm) *RemoteProvider {
- switch {
- case prm.Key == nil:
- common.PanicOnPrmValue("NetMapSource", prm.Key)
- case prm.Log == nil:
- common.PanicOnPrmValue("Logger", prm.Log)
- }
-
- return &RemoteProvider{
- key: prm.Key,
- log: prm.Log,
- }
-}
-
-// RemoteProvider is an implementation of the clientKeyRemoteProvider interface.
-type RemoteProvider struct {
- key *ecdsa.PrivateKey
- log *logger.Logger
-}
-
-func (rp RemoteProvider) WithClient(c coreclient.Client) reputationcommon.WriterProvider {
- return &TrustWriterProvider{
- client: c,
- key: rp.key,
- log: rp.log,
- }
-}
-
-type TrustWriterProvider struct {
- client coreclient.Client
- key *ecdsa.PrivateKey
- log *logger.Logger
-}
-
-func (twp *TrustWriterProvider) InitWriter(ep reputationcommon.EpochProvider) (reputationcommon.Writer, error) {
- return &RemoteTrustWriter{
- ep: ep,
- client: twp.client,
- key: twp.key,
- log: twp.log,
- }, nil
-}
-
-type RemoteTrustWriter struct {
- ep reputationcommon.EpochProvider
- client coreclient.Client
- key *ecdsa.PrivateKey
- log *logger.Logger
-
- buf []reputationapi.Trust
-}
-
-func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error {
- var apiTrust reputationapi.Trust
-
- apiTrust.SetValue(t.Value().Float64())
- apiTrust.SetPeer(t.Peer())
-
- rtp.buf = append(rtp.buf, apiTrust)
-
- return nil
-}
-
-func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
- epoch := rtp.ep.Epoch()
-
- rtp.log.Debug(logs.LocalAnnouncingTrusts,
- zap.Uint64("epoch", epoch),
- )
-
- var prm internalclient.AnnounceLocalPrm
-
- prm.SetClient(rtp.client)
- prm.SetEpoch(epoch)
- prm.SetTrusts(rtp.buf)
-
- _, err := internalclient.AnnounceLocal(ctx, prm)
-
- return err
-}
diff --git a/cmd/frostfs-node/reputation/local/storage.go b/cmd/frostfs-node/reputation/local/storage.go
deleted file mode 100644
index a0dc3d4ce..000000000
--- a/cmd/frostfs-node/reputation/local/storage.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package local
-
-import (
- "bytes"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- trustcontroller "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/controller"
- truststorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/local/storage"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-type TrustStorage struct {
- Log *logger.Logger
-
- Storage *truststorage.Storage
-
- NmSrc netmapcore.Source
-
- LocalKey []byte
-}
-
-func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
- epoch := ep.Epoch()
-
- s.Log.Debug(logs.LocalInitializingIteratorOverTrusts,
- zap.Uint64("epoch", epoch),
- )
-
- epochStorage, err := s.Storage.DataForEpoch(epoch)
- if err != nil && !errors.Is(err, truststorage.ErrNoPositiveTrust) {
- return nil, err
- }
-
- return &TrustIterator{
- ep: ep,
- storage: s,
- epochStorage: epochStorage,
- }, nil
-}
-
-type TrustIterator struct {
- ep reputationcommon.EpochProvider
-
- storage *TrustStorage
-
- epochStorage *truststorage.EpochTrustValueStorage
-}
-
-func (it *TrustIterator) Iterate(h reputation.TrustHandler) error {
- if it.epochStorage != nil {
- err := it.epochStorage.Iterate(h)
- if !errors.Is(err, truststorage.ErrNoPositiveTrust) {
- return err
- }
- }
-
- nm, err := it.storage.NmSrc.GetNetMapByEpoch(it.ep.Epoch())
- if err != nil {
- return err
- }
-
- // find out if local node is presented in netmap
- localIndex := -1
-
- nmNodes := nm.Nodes()
- for i := range nmNodes {
- if bytes.Equal(nmNodes[i].PublicKey(), it.storage.LocalKey) {
- localIndex = i
- break
- }
- }
-
- ln := len(nmNodes)
- if localIndex >= 0 && ln > 0 {
- ln--
- }
-
- // calculate Pj http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5.
- p := reputation.TrustOne.Div(reputation.TrustValueFromInt(ln))
-
- for i := range nmNodes {
- if i == localIndex {
- continue
- }
-
- var trusted, trusting apireputation.PeerID
-
- trusted.SetPublicKey(nmNodes[i].PublicKey())
- trusting.SetPublicKey(it.storage.LocalKey)
-
- trust := reputation.Trust{}
- trust.SetPeer(trusted)
- trust.SetValue(p)
- trust.SetTrustingPeer(trusting)
-
- if err := h(trust); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/cmd/frostfs-node/reputation/ticker/fixed.go b/cmd/frostfs-node/reputation/ticker/fixed.go
deleted file mode 100644
index 5403882d5..000000000
--- a/cmd/frostfs-node/reputation/ticker/fixed.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package ticker
-
-import (
- "fmt"
- "sync"
-)
-
-// IterationHandler is a callback of a certain block advance.
-type IterationHandler func()
-
-// IterationsTicker represents a fixed tick number block timer.
-//
-// It can tick the blocks and perform certain actions
-// on block time intervals.
-type IterationsTicker struct {
- m sync.Mutex
-
- curr uint64
- period uint64
-
- times uint64
-
- h IterationHandler
-}
-
-// NewIterationsTicker creates a new IterationsTicker.
-//
-// It guaranties that a handler would be called the
-// specified amount of times in the specified amount
-// of blocks. After the last meaningful Tick, IterationsTicker
-// becomes no-op timer.
-//
-// Returns an error only if times is greater than totalBlocks.
-func NewIterationsTicker(totalBlocks uint64, times uint64, h IterationHandler) (*IterationsTicker, error) {
- period := totalBlocks / times
-
- if period == 0 {
- return nil, fmt.Errorf("impossible to tick %d times in %d blocks",
- times, totalBlocks,
- )
- }
-
- var curr uint64
-
- // try to make handler calls as rare as possible
- if totalBlocks%times != 0 {
- extraBlocks := (period+1)*times - totalBlocks
-
- if period >= extraBlocks {
- curr = extraBlocks + (period-extraBlocks)/2
- period++
- }
- }
-
- return &IterationsTicker{
- curr: curr,
- period: period,
- times: times,
- h: h,
- }, nil
-}
-
-// Tick ticks one block in the IterationsTicker.
-//
-// Returns `false` if the timer has finished its operations
-// and there will be no more handler calls.
-// Calling Tick after the returned `false` is safe, no-op
-// and also returns `false`.
-func (ft *IterationsTicker) Tick() bool {
- ft.m.Lock()
- defer ft.m.Unlock()
-
- if ft.times == 0 {
- return false
- }
-
- ft.curr++
-
- if ft.curr%ft.period == 0 {
- ft.h()
-
- ft.times--
-
- if ft.times == 0 {
- return false
- }
- }
-
- return true
-}
diff --git a/cmd/frostfs-node/reputation/ticker/fixed_test.go b/cmd/frostfs-node/reputation/ticker/fixed_test.go
deleted file mode 100644
index 25e9bd08f..000000000
--- a/cmd/frostfs-node/reputation/ticker/fixed_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package ticker
-
-import (
- "errors"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestFixedTimer_Tick(t *testing.T) {
- tests := [...]struct {
- duration uint64
- times uint64
- err error
- }{
- {
- duration: 20,
- times: 4,
- err: nil,
- },
- {
- duration: 6,
- times: 6,
- err: nil,
- },
- {
- duration: 10,
- times: 6,
- err: nil,
- },
- {
- duration: 5,
- times: 6,
- err: errors.New("impossible to tick 6 times in 5 blocks"),
- },
- }
-
- for _, test := range tests {
- t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) {
- counter := uint64(0)
-
- timer, err := NewIterationsTicker(test.duration, test.times, func() {
- counter++
- })
- if test.err != nil {
- require.EqualError(t, err, test.err.Error())
- return
- }
-
- require.NoError(t, err)
-
- for i := 0; i < int(test.duration); i++ {
- if !timer.Tick() {
- break
- }
- }
-
- require.Equal(t, false, timer.Tick())
- require.Equal(t, test.times, counter)
- })
- }
-}
-
-func TestFixedTimer_RareCalls(t *testing.T) {
- tests := [...]struct {
- duration uint64
- times uint64
- firstCall uint64
- period uint64
- }{
- {
- duration: 11,
- times: 6,
- firstCall: 1,
- period: 2,
- },
- {
- duration: 11,
- times: 4,
- firstCall: 2,
- period: 3,
- },
- {
- duration: 20,
- times: 3,
- firstCall: 4,
- period: 7,
- },
- }
-
- for _, test := range tests {
- t.Run(fmt.Sprintf("duration:%d,times:%d", test.duration, test.times), func(t *testing.T) {
- var counter uint64
-
- timer, err := NewIterationsTicker(test.duration, test.times, func() {
- counter++
- })
- require.NoError(t, err)
-
- checked := false
-
- for i := 1; i <= int(test.duration); i++ {
- if !timer.Tick() {
- break
- }
-
- if !checked && counter == 1 {
- require.Equal(t, test.firstCall, uint64(i))
- checked = true
- }
- }
-
- require.Equal(t, false, timer.Tick())
- require.Equal(t, test.times, counter)
- })
- }
-}
diff --git a/cmd/frostfs-node/timers.go b/cmd/frostfs-node/timers.go
deleted file mode 100644
index 2ee2e8656..000000000
--- a/cmd/frostfs-node/timers.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
-)
-
-type eigenTrustTickers struct {
- m sync.Mutex
-
- timers map[uint64]*ticker.IterationsTicker
-}
-
-func (e *eigenTrustTickers) addEpochTimer(epoch uint64, timer *ticker.IterationsTicker) {
- e.m.Lock()
- defer e.m.Unlock()
-
- e.timers[epoch] = timer
-}
-
-func (e *eigenTrustTickers) tick() {
- e.m.Lock()
- defer e.m.Unlock()
-
- for epoch, t := range e.timers {
- if !t.Tick() {
- delete(e.timers, epoch)
- }
- }
-}
-
-func tickBlockTimers(c *cfg) {
- c.cfgMorph.eigenTrustTicker.tick()
-}
-
-func newEigenTrustIterTimer(c *cfg) {
- c.cfgMorph.eigenTrustTicker = &eigenTrustTickers{
- // it is expected to have max 2 concurrent epoch
- // in normal mode work
- timers: make(map[uint64]*ticker.IterationsTicker, 2),
- }
-}
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 2f5e89e39..5ac3154bc 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -18,7 +18,6 @@ import (
addrvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/maddress"
statevalidation "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
subnetvalidator "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/subnet"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/reputation"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
auditSettlement "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
@@ -29,13 +28,11 @@ import (
frostfsClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
audittask "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit/taskmanager"
control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir"
controlsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server"
- reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
util2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
@@ -467,33 +464,6 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper, frostfsIDClient *
return bindMainnetProcessor(frostfsProcessor, s)
}
-func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.Fixed8) error {
- repClient, err := repClient.NewFromMorph(s.morphClient, s.contracts.reputation, sidechainFee, repClient.TryNotary(), repClient.AsAlphabet())
- if err != nil {
- return err
- }
-
- // create reputation processor
- reputationProcessor, err := reputation.New(&reputation.Params{
- Log: s.log,
- PoolSize: cfg.GetInt("workers.reputation"),
- EpochState: s,
- AlphabetState: s,
- ReputationWrapper: repClient,
- ManagerBuilder: reputationcommon.NewManagerBuilder(
- reputationcommon.ManagersPrm{
- NetMapSource: s.netmapClient,
- },
- ),
- NotaryDisabled: s.sideNotaryConfig.disabled,
- })
- if err != nil {
- return err
- }
-
- return bindMorphProcessor(reputationProcessor, s)
-}
-
func (s *Server) initGRPCServer(cfg *viper.Viper) error {
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
if controlSvcEndpoint == "" {
@@ -620,8 +590,6 @@ type serverProcessors struct {
func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) (*serverProcessors, error) {
result := &serverProcessors{}
- fee := s.feeConfig.SideChainFee()
-
irf := s.createIRFetcher()
s.statusIndex = newInnerRingIndexer(
@@ -681,11 +649,6 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien
return nil, err
}
- err = s.initReputationProcessor(cfg, fee)
- if err != nil {
- return nil, err
- }
-
return result, nil
}
diff --git a/pkg/innerring/processors/reputation/handlers.go b/pkg/innerring/processors/reputation/handlers.go
deleted file mode 100644
index 9b8e7f66a..000000000
--- a/pkg/innerring/processors/reputation/handlers.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package reputation
-
-import (
- "encoding/hex"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- "go.uber.org/zap"
-)
-
-func (rp *Processor) handlePutReputation(ev event.Event) {
- put := ev.(reputationEvent.Put)
- peerID := put.PeerID()
-
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Info(logs.Notification,
- zap.String("type", "reputation put"),
- zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
-
- // send event to the worker pool
-
- err := rp.pool.Submit(func() { rp.processPut(&put) })
- if err != nil {
- // there system can be moved into controlled degradation stage
- rp.log.Warn(logs.ReputationReputationWorkerPoolDrained,
- zap.Int("capacity", rp.pool.Cap()))
- }
-}
diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go
deleted file mode 100644
index f8814dd06..000000000
--- a/pkg/innerring/processors/reputation/process_put.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package reputation
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-var errWrongManager = errors.New("got manager that is incorrect for peer")
-
-func (rp *Processor) processPut(e *reputationEvent.Put) {
- if !rp.alphabetState.IsAlphabet() {
- rp.log.Info(logs.ReputationNonAlphabetModeIgnoreReputationPutNotification)
- return
- }
-
- epoch := e.Epoch()
- id := e.PeerID()
- value := e.Value()
-
- // check if epoch is valid
- currentEpoch := rp.epochState.EpochCounter()
- if epoch >= currentEpoch {
- rp.log.Info(logs.ReputationIgnoreReputationValue,
- zap.String("reason", "invalid epoch number"),
- zap.Uint64("trust_epoch", epoch),
- zap.Uint64("local_epoch", currentEpoch))
-
- return
- }
-
- // check signature
- if !value.VerifySignature() {
- rp.log.Info(logs.ReputationIgnoreReputationValue,
- zap.String("reason", "invalid signature"),
- )
-
- return
- }
-
- // check if manager is correct
- if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
- rp.log.Info(logs.ReputationIgnoreReputationValue,
- zap.String("reason", "wrong manager"),
- zap.String("error", err.Error()))
-
- return
- }
-
- rp.approvePutReputation(e)
-}
-
-func (rp *Processor) checkManagers(e uint64, mng apireputation.PeerID, peer apireputation.PeerID) error {
- mm, err := rp.mngBuilder.BuildManagers(e, peer)
- if err != nil {
- return fmt.Errorf("could not build managers: %w", err)
- }
-
- for _, m := range mm {
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- if bytes.Equal(mng.PublicKey(), m.PublicKey()) {
- return nil
- }
- }
-
- return errWrongManager
-}
-
-func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
- var (
- id = e.PeerID()
- err error
- )
-
- if nr := e.NotaryRequest(); nr != nil {
- // put event was received via Notary service
- err = rp.reputationWrp.Morph().NotarySignAndInvokeTX(nr.MainTransaction)
- } else {
- args := repClient.PutPrm{}
- args.SetEpoch(e.Epoch())
- args.SetPeerID(id)
- args.SetValue(e.Value())
-
- err = rp.reputationWrp.Put(args)
- }
- if err != nil {
- // FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
- rp.log.Warn(logs.ReputationCantSendApprovalTxForReputationValue,
- zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
- zap.String("error", err.Error()))
- }
-}
diff --git a/pkg/innerring/processors/reputation/processor.go b/pkg/innerring/processors/reputation/processor.go
deleted file mode 100644
index a248fa75f..000000000
--- a/pkg/innerring/processors/reputation/processor.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package reputation
-
-import (
- "errors"
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "github.com/nspcc-dev/neo-go/pkg/core/mempoolevent"
- "github.com/panjf2000/ants/v2"
- "go.uber.org/zap"
-)
-
-type (
- // EpochState is a callback interface for inner ring global state.
- EpochState interface {
- EpochCounter() uint64
- }
-
- // AlphabetState is a callback interface for inner ring global state.
- AlphabetState interface {
- IsAlphabet() bool
- }
-
- // Processor of events produced by reputation contract.
- Processor struct {
- log *logger.Logger
- pool *ants.Pool
-
- epochState EpochState
- alphabetState AlphabetState
-
- reputationWrp *repClient.Client
-
- mngBuilder common.ManagerBuilder
-
- notaryDisabled bool
- }
-
- // Params of the processor constructor.
- Params struct {
- Log *logger.Logger
- PoolSize int
- EpochState EpochState
- AlphabetState AlphabetState
- ReputationWrapper *repClient.Client
- ManagerBuilder common.ManagerBuilder
- NotaryDisabled bool
- }
-)
-
-const (
- putReputationNotification = "reputationPut"
-)
-
-// New creates reputation contract processor instance.
-func New(p *Params) (*Processor, error) {
- switch {
- case p.Log == nil:
- return nil, errors.New("ir/reputation: logger is not set")
- case p.EpochState == nil:
- return nil, errors.New("ir/reputation: global state is not set")
- case p.AlphabetState == nil:
- return nil, errors.New("ir/reputation: global state is not set")
- case p.ReputationWrapper == nil:
- return nil, errors.New("ir/reputation: reputation contract wrapper is not set")
- case p.ManagerBuilder == nil:
- return nil, errors.New("ir/reputation: manager builder is not set")
- }
-
- p.Log.Debug(logs.ReputationReputationWorkerPool, zap.Int("size", p.PoolSize))
-
- pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
- if err != nil {
- return nil, fmt.Errorf("ir/reputation: can't create worker pool: %w", err)
- }
-
- return &Processor{
- log: p.Log,
- pool: pool,
- epochState: p.EpochState,
- alphabetState: p.AlphabetState,
- reputationWrp: p.ReputationWrapper,
- mngBuilder: p.ManagerBuilder,
- notaryDisabled: p.NotaryDisabled,
- }, nil
-}
-
-// ListenerNotificationParsers for the 'event.Listener' event producer.
-func (rp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo {
- if !rp.notaryDisabled {
- return nil
- }
-
- var parsers []event.NotificationParserInfo
-
- // put reputation event
- put := event.NotificationParserInfo{}
- put.SetType(putReputationNotification)
- put.SetScriptHash(rp.reputationWrp.ContractAddress())
- put.SetParser(reputationEvent.ParsePut)
- parsers = append(parsers, put)
-
- return parsers
-}
-
-// ListenerNotificationHandlers for the 'event.Listener' event producer.
-func (rp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo {
- if !rp.notaryDisabled {
- return nil
- }
-
- var handlers []event.NotificationHandlerInfo
-
- // put reputation handler
- put := event.NotificationHandlerInfo{}
- put.SetType(putReputationNotification)
- put.SetScriptHash(rp.reputationWrp.ContractAddress())
- put.SetHandler(rp.handlePutReputation)
- handlers = append(handlers, put)
-
- return handlers
-}
-
-// ListenerNotaryParsers for the 'event.Listener' notary event producer.
-func (rp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
- var p event.NotaryParserInfo
-
- p.SetMempoolType(mempoolevent.TransactionAdded)
- p.SetRequestType(reputationEvent.PutNotaryEvent)
- p.SetScriptHash(rp.reputationWrp.ContractAddress())
- p.SetParser(reputationEvent.ParsePutNotary)
-
- return []event.NotaryParserInfo{p}
-}
-
-// ListenerNotaryHandlers for the 'event.Listener' notary event producer.
-func (rp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
- var h event.NotaryHandlerInfo
-
- h.SetMempoolType(mempoolevent.TransactionAdded)
- h.SetRequestType(reputationEvent.PutNotaryEvent)
- h.SetScriptHash(rp.reputationWrp.ContractAddress())
- h.SetHandler(rp.handlePutReputation)
-
- return []event.NotaryHandlerInfo{h}
-}
-
-// TimersHandlers for the 'Timers' event producer.
-func (rp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go
index 9a89c4fcb..27338eaa4 100644
--- a/pkg/innerring/rpc.go
+++ b/pkg/innerring/rpc.go
@@ -27,7 +27,7 @@ type (
ClientCache struct {
log *logger.Logger
cache interface {
- Get(clientcore.NodeInfo) (clientcore.Client, error)
+ Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
CloseAll()
}
key *ecdsa.PrivateKey
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 549e98b65..371d3c76f 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -38,7 +38,7 @@ func NewSDKClientCache(opts ClientCacheOpts) *ClientCache {
}
// Get function returns existing client or creates a new one.
-func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) {
+func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.MultiAddressClient, error) {
netAddr := info.AddressGroup()
if c.opts.AllowExternal {
netAddr = append(netAddr, info.ExternalAddressGroup()...)
diff --git a/pkg/network/transport/reputation/grpc/service.go b/pkg/network/transport/reputation/grpc/service.go
deleted file mode 100644
index bb9074324..000000000
--- a/pkg/network/transport/reputation/grpc/service.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package grpcreputation
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- reputation2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation/grpc"
- reputationrpc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/rpc"
-)
-
-// Server wraps FrostFS API v2 Reputation service server
-// and provides gRPC Reputation service server interface.
-type Server struct {
- srv reputationrpc.Server
-}
-
-// New creates, initializes and returns Server instance.
-func New(srv reputationrpc.Server) *Server {
- return &Server{
- srv: srv,
- }
-}
-
-func (s *Server) AnnounceLocalTrust(ctx context.Context, r *reputation2.AnnounceLocalTrustRequest) (*reputation2.AnnounceLocalTrustResponse, error) {
- req := new(reputation.AnnounceLocalTrustRequest)
- if err := req.FromGRPCMessage(r); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AnnounceLocalTrust(ctx, req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*reputation2.AnnounceLocalTrustResponse), nil
-}
-
-func (s *Server) AnnounceIntermediateResult(ctx context.Context, r *reputation2.AnnounceIntermediateResultRequest) (*reputation2.AnnounceIntermediateResultResponse, error) {
- req := new(reputation.AnnounceIntermediateResultRequest)
- if err := req.FromGRPCMessage(r); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.AnnounceIntermediateResult(ctx, req)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*reputation2.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/services/object/head/remote.go b/pkg/services/object/head/remote.go
index 85f076a76..bcba181f2 100644
--- a/pkg/services/object/head/remote.go
+++ b/pkg/services/object/head/remote.go
@@ -15,7 +15,7 @@ import (
)
type ClientConstructor interface {
- Get(clientcore.NodeInfo) (clientcore.Client, error)
+ Get(clientcore.NodeInfo) (clientcore.MultiAddressClient, error)
}
// RemoteHeader represents utility for getting
diff --git a/pkg/services/reputation/common/deps.go b/pkg/services/reputation/common/deps.go
deleted file mode 100644
index 3ea5aa88e..000000000
--- a/pkg/services/reputation/common/deps.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package common
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type EpochProvider interface {
- // Must return epoch number to select the values.
- Epoch() uint64
-}
-
-// Writer describes the interface for storing reputation.Trust values.
-//
-// This interface is provided by both local storage
-// of values and remote (wrappers over the RPC).
-type Writer interface {
- // Write performs a write operation of reputation.Trust value
- // and returns any error encountered.
- //
- // All values after the Close call must be flushed to the
- // physical target. Implementations can cache values before
- // Close operation.
- //
- // Write must not be called after Close.
- Write(context.Context, reputation.Trust) error
-
- // Close exits with method-providing Writer.
- //
- // All cached values must be flushed before
- // the Close's return.
- //
- // Methods must not be called after Close.
- Close(context.Context) error
-}
-
-// WriterProvider is a group of methods provided
-// by entity which generates keepers of
-// reputation.Trust values.
-type WriterProvider interface {
- // InitWriter should return an initialized Writer.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Writer must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitWriter(EpochProvider) (Writer, error)
-}
-
-// ManagerBuilder defines an interface for providing a list
-// of Managers for specific epoch. Implementation depends on trust value.
-type ManagerBuilder interface {
- // BuildManagers must compose list of managers. It depends on
- // particular epoch and PeerID of the current route point.
- BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error)
-}
-
-// ServerInfo describes a set of
-// characteristics of a point in a route.
-type ServerInfo interface {
- // PublicKey returns public key of the node
- // from the route in a binary representation.
- PublicKey() []byte
-
- // Iterates over network addresses of the node
- // in the route. Breaks iterating on true return
- // of the handler.
- IterateAddresses(func(string) bool)
-
- // Returns number of server's network addresses.
- NumberOfAddresses() int
-
- // ExternalAddresses returns external addresses of a node.
- ExternalAddresses() []string
-}
diff --git a/pkg/services/reputation/common/managers.go b/pkg/services/reputation/common/managers.go
deleted file mode 100644
index 84201809f..000000000
--- a/pkg/services/reputation/common/managers.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package common
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- apiNetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "git.frostfs.info/TrueCloudLab/hrw"
- "go.uber.org/zap"
-)
-
-// managerBuilder is implementation of reputation ManagerBuilder interface.
-// It sorts nodes in NetMap with HRW algorithms and
-// takes the next node after the current one as the only manager.
-type managerBuilder struct {
- log *logger.Logger
- nmSrc netmapcore.Source
- opts *mngOptions
-}
-
-// ManagersPrm groups the required parameters of the managerBuilder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type ManagersPrm struct {
- NetMapSource netmapcore.Source
-}
-
-// NewManagerBuilder creates a new instance of the managerBuilder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created managerBuilder does not require additional
-// initialization and is completely ready for work.
-func NewManagerBuilder(prm ManagersPrm, opts ...MngOption) ManagerBuilder {
- switch {
- case prm.NetMapSource == nil:
- panic(fmt.Sprintf("invalid NetMapSource (%T):%v", prm.NetMapSource, prm.NetMapSource))
- }
-
- o := defaultMngOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &managerBuilder{
- log: o.log,
- nmSrc: prm.NetMapSource,
- opts: o,
- }
-}
-
-// implements Server on apiNetmap.NodeInfo.
-type nodeServer apiNetmap.NodeInfo
-
-func (x nodeServer) PublicKey() []byte {
- return (apiNetmap.NodeInfo)(x).PublicKey()
-}
-
-func (x nodeServer) IterateAddresses(f func(string) bool) {
- (apiNetmap.NodeInfo)(x).IterateNetworkEndpoints(f)
-}
-
-func (x nodeServer) NumberOfAddresses() int {
- return (apiNetmap.NodeInfo)(x).NumberOfNetworkEndpoints()
-}
-
-func (x nodeServer) ExternalAddresses() []string {
- return (apiNetmap.NodeInfo)(x).ExternalAddresses()
-}
-
-// BuildManagers sorts nodes in NetMap with HRW algorithms and
-// takes the next node after the current one as the only manager.
-func (mb *managerBuilder) BuildManagers(epoch uint64, p apireputation.PeerID) ([]ServerInfo, error) {
- mb.log.Debug(logs.CommonStartBuildingManagers,
- zap.Uint64("epoch", epoch),
- zap.Stringer("peer", p),
- )
-
- nm, err := mb.nmSrc.GetNetMapByEpoch(epoch)
- if err != nil {
- return nil, err
- }
-
- nmNodes := nm.Nodes()
-
- // make a copy to keep order consistency of the origin netmap after sorting
- nodes := make([]apiNetmap.NodeInfo, len(nmNodes))
-
- copy(nodes, nmNodes)
-
- hrw.SortHasherSliceByValue(nodes, epoch)
-
- for i := range nodes {
- if apireputation.ComparePeerKey(p, nodes[i].PublicKey()) {
- managerIndex := i + 1
-
- if managerIndex == len(nodes) {
- managerIndex = 0
- }
-
- return []ServerInfo{nodeServer(nodes[managerIndex])}, nil
- }
- }
-
- return nil, nil
-}
-
-type mngOptions struct {
- log *logger.Logger
-}
-
-type MngOption func(*mngOptions)
-
-func defaultMngOpts() *mngOptions {
- return &mngOptions{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns MngOption to specify logging component.
-func WithLogger(l *logger.Logger) MngOption {
- return func(o *mngOptions) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go
deleted file mode 100644
index 4ed293beb..000000000
--- a/pkg/services/reputation/common/router/calls.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package router
-
-import (
- "context"
- "encoding/hex"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// RouteInfo wraps epoch provider with additional passed
-// route data. It is only used inside Router and is
-// not passed in any external methods.
-type RouteInfo struct {
- common.EpochProvider
-
- passedRoute []common.ServerInfo
-}
-
-// NewRouteInfo wraps the main context of value passing with its traversal route and epoch.
-func NewRouteInfo(ep common.EpochProvider, passed []common.ServerInfo) *RouteInfo {
- return &RouteInfo{
- EpochProvider: ep,
- passedRoute: passed,
- }
-}
-
-type trustWriter struct {
- router *Router
-
- routeInfo *RouteInfo
-
- routeMtx sync.RWMutex
- mServers map[string]common.Writer
-}
-
-// InitWriter initializes and returns Writer that sends each value to its next route point.
-//
-// If ep was created by NewRouteInfo, then the traversed route is taken into account,
-// and the value will be sent to its continuation. Otherwise, the route will be laid
-// from scratch and the value will be sent to its primary point.
-//
-// After building a list of remote points of the next leg of the route, the value is sent
-// sequentially to all of them. If any transmissions (even all) fail, an error will not
-// be returned.
-//
-// Close of the composed Writer calls Close method on each internal Writer generated in
-// runtime and never returns an error.
-//
-// Always returns nil error.
-func (r *Router) InitWriter(ep common.EpochProvider) (common.Writer, error) {
- var (
- routeInfo *RouteInfo
- ok bool
- )
-
- if routeInfo, ok = ep.(*RouteInfo); !ok {
- routeInfo = &RouteInfo{
- EpochProvider: ep,
- passedRoute: []common.ServerInfo{r.localSrvInfo},
- }
- }
-
- return &trustWriter{
- router: r,
- routeInfo: routeInfo,
- mServers: make(map[string]common.Writer),
- }, nil
-}
-
-func (w *trustWriter) Write(ctx context.Context, t reputation.Trust) error {
- w.routeMtx.Lock()
- defer w.routeMtx.Unlock()
-
- route, err := w.router.routeBuilder.NextStage(w.routeInfo.Epoch(), t, w.routeInfo.passedRoute)
- if err != nil {
- return err
- } else if len(route) == 0 {
- route = []common.ServerInfo{nil}
- }
-
- for _, remoteInfo := range route {
- var key string
-
- if remoteInfo != nil {
- key = hex.EncodeToString(remoteInfo.PublicKey())
- }
-
- remoteWriter, ok := w.mServers[key]
- if !ok {
- provider, err := w.router.remoteProvider.InitRemote(remoteInfo)
- if err != nil {
- w.router.log.Debug(logs.RouterCouldNotInitializeWriterProvider,
- zap.String("error", err.Error()),
- )
-
- continue
- }
-
- // init writer with original context wrapped in routeContext
- remoteWriter, err = provider.InitWriter(w.routeInfo.EpochProvider)
- if err != nil {
- w.router.log.Debug(logs.RouterCouldNotInitializeWriter,
- zap.String("error", err.Error()),
- )
-
- continue
- }
-
- w.mServers[key] = remoteWriter
- }
-
- err := remoteWriter.Write(ctx, t)
- if err != nil {
- w.router.log.Debug(logs.RouterCouldNotWriteTheValue,
- zap.String("error", err.Error()),
- )
- }
- }
-
- return nil
-}
-
-func (w *trustWriter) Close(ctx context.Context) error {
- for key, wRemote := range w.mServers {
- err := wRemote.Close(ctx)
- if err != nil {
- w.router.log.Debug(logs.RouterCouldNotCloseRemoteServerWriter,
- zap.String("key", key),
- zap.String("error", err.Error()),
- )
- }
- }
-
- return nil
-}
diff --git a/pkg/services/reputation/common/router/deps.go b/pkg/services/reputation/common/router/deps.go
deleted file mode 100644
index 36aecb59f..000000000
--- a/pkg/services/reputation/common/router/deps.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package router
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-// Builder groups methods to route values in the network.
-type Builder interface {
- // NextStage must return next group of route points
- // for passed epoch and trust values.
- // Implementation must take into account already passed route points.
- //
- // Empty passed list means being at the starting point of the route.
- //
- // Must return empty list and no error if the endpoint of the route is reached.
- NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error)
-}
-
-// RemoteWriterProvider describes the component
-// for sending values to a fixed route point.
-type RemoteWriterProvider interface {
- // InitRemote must return WriterProvider to the route point
- // corresponding to info.
- //
- // Nil info matches the end of the route.
- InitRemote(info common.ServerInfo) (common.WriterProvider, error)
-}
diff --git a/pkg/services/reputation/common/router/opts.go b/pkg/services/reputation/common/router/opts.go
deleted file mode 100644
index 1b3454412..000000000
--- a/pkg/services/reputation/common/router/opts.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package router
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Router.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns Option to specify logging component.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/common/router/router.go b/pkg/services/reputation/common/router/router.go
deleted file mode 100644
index b80f6ce52..000000000
--- a/pkg/services/reputation/common/router/router.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package router
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Router's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Characteristics of the local node's server.
- //
- // Must not be nil.
- LocalServerInfo common.ServerInfo
-
- // Component for sending values to a fixed route point.
- //
- // Must not be nil.
- RemoteWriterProvider RemoteWriterProvider
-
- // Route planner.
- //
- // Must not be nil.
- Builder Builder
-}
-
-// Router represents component responsible for routing
-// local trust values over the network.
-//
-// For each fixed pair (node peer, epoch) there is a
-// single value route on the network. Router provides the
-// interface for writing values to the next point of the route.
-//
-// For correct operation, Router must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Router is immediately ready to work through API.
-type Router struct {
- log *logger.Logger
-
- remoteProvider RemoteWriterProvider
-
- routeBuilder Builder
-
- localSrvInfo common.ServerInfo
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-func New(prm Prm, opts ...Option) *Router {
- switch {
- case prm.RemoteWriterProvider == nil:
- panicOnPrmValue("RemoteWriterProvider", prm.RemoteWriterProvider)
- case prm.Builder == nil:
- panicOnPrmValue("Builder", prm.Builder)
- case prm.LocalServerInfo == nil:
- panicOnPrmValue("LocalServerInfo", prm.LocalServerInfo)
- }
-
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- return &Router{
- log: o.log,
- remoteProvider: prm.RemoteWriterProvider,
- routeBuilder: prm.Builder,
- localSrvInfo: prm.LocalServerInfo,
- }
-}
diff --git a/pkg/services/reputation/common/router/util.go b/pkg/services/reputation/common/router/util.go
deleted file mode 100644
index aa3190d2b..000000000
--- a/pkg/services/reputation/common/router/util.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package router
-
-import (
- "bytes"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-var errWrongRoute = errors.New("wrong route")
-
-// CheckRoute checks if the route is a route correctly constructed by the builder for value a.
-//
-// Returns nil if route is correct, otherwise an error clarifying the inconsistency.
-func CheckRoute(builder Builder, epoch uint64, t reputation.Trust, route []common.ServerInfo) error {
- for i := 1; i < len(route); i++ {
- servers, err := builder.NextStage(epoch, t, route[:i])
- if err != nil {
- return err
- } else if len(servers) == 0 {
- break
- }
-
- found := false
-
- for j := range servers {
- if bytes.Equal(servers[j].PublicKey(), route[i].PublicKey()) {
- found = true
- break
- }
- }
-
- if !found {
- return errWrongRoute
- }
- }
-
- return nil
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/calculator.go b/pkg/services/reputation/eigentrust/calculator/calculator.go
deleted file mode 100644
index bfa274fea..000000000
--- a/pkg/services/reputation/eigentrust/calculator/calculator.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package eigentrustcalc
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
-)
-
-// Prm groups the required parameters of the Calculator's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Alpha parameter from origin EigenTrust algorithm
- // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1.
- //
- // Must be in range (0, 1).
- AlphaProvider AlphaProvider
-
- // Source of initial node trust values
- //
- // Must not be nil.
- InitialTrustSource InitialTrustSource
-
- DaughterTrustSource DaughterTrustIteratorProvider
-
- IntermediateValueTarget common.WriterProvider
-
- FinalResultTarget IntermediateWriterProvider
-
- WorkerPool util.WorkerPool
-}
-
-// Calculator is a processor of a single iteration of EigenTrust algorithm.
-//
-// For correct operation, the Calculator must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Calculator is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Calculator struct {
- alpha, beta reputation.TrustValue // beta = 1 - alpha
-
- prm Prm
-
- opts *options
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Calculator.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Calculator does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Calculator {
- switch {
- case prm.AlphaProvider == nil:
- panicOnPrmValue("AlphaProvider", prm.AlphaProvider)
- case prm.InitialTrustSource == nil:
- panicOnPrmValue("InitialTrustSource", prm.InitialTrustSource)
- case prm.DaughterTrustSource == nil:
- panicOnPrmValue("DaughterTrustSource", prm.DaughterTrustSource)
- case prm.IntermediateValueTarget == nil:
- panicOnPrmValue("IntermediateValueTarget", prm.IntermediateValueTarget)
- case prm.FinalResultTarget == nil:
- panicOnPrmValue("FinalResultTarget", prm.FinalResultTarget)
- case prm.WorkerPool == nil:
- panicOnPrmValue("WorkerPool", prm.WorkerPool)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Calculator{
- prm: prm,
- opts: o,
- }
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go
deleted file mode 100644
index 5e2e900ae..000000000
--- a/pkg/services/reputation/eigentrust/calculator/calls.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package eigentrustcalc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "go.uber.org/zap"
-)
-
-type CalculatePrm struct {
- last bool
-
- ei eigentrust.EpochIteration
-}
-
-func (p *CalculatePrm) SetLast(last bool) {
- p.last = last
-}
-
-func (p *CalculatePrm) SetEpochIteration(ei eigentrust.EpochIteration) {
- p.ei = ei
-}
-
-func (c *Calculator) Calculate(ctx context.Context, prm CalculatePrm) {
- alpha, err := c.prm.AlphaProvider.EigenTrustAlpha()
- if err != nil {
- c.opts.log.Debug(
- logs.CalculatorFailedToGetAlphaParam,
- zap.Error(err),
- )
- return
- }
-
- c.alpha = reputation.TrustValueFromFloat64(alpha)
- c.beta = reputation.TrustValueFromFloat64(1 - alpha)
-
- epochIteration := prm.ei
-
- iter := epochIteration.I()
-
- log := c.opts.log.With(
- zap.Uint64("epoch", epochIteration.Epoch()),
- zap.Uint32("iteration", iter),
- )
-
- if iter == 0 {
- c.sendInitialValues(ctx, epochIteration)
- return
- }
-
- // decrement iteration number to select the values collected
- // on the previous stage
- epochIteration.SetI(iter - 1)
-
- consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(epochIteration)
- if err != nil {
- log.Debug(logs.CalculatorConsumersTrustIteratorsInitFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // continue with initial iteration number
- epochIteration.SetI(iter)
-
- err = consumersIter.Iterate(func(daughter apireputation.PeerID, iter TrustIterator) error {
- err := c.prm.WorkerPool.Submit(func() {
- c.iterateDaughter(ctx, iterDaughterPrm{
- lastIter: prm.last,
- ei: epochIteration,
- id: daughter,
- consumersIter: iter,
- })
- })
- if err != nil {
- log.Debug(logs.CalculatorWorkerPoolSubmitFailure,
- zap.String("error", err.Error()),
- )
- }
-
- // don't stop trying
- return nil
- })
- if err != nil {
- log.Debug(logs.CalculatorIterateDaughtersConsumersFailed,
- zap.String("error", err.Error()),
- )
- }
-}
-
-type iterDaughterPrm struct {
- lastIter bool
-
- ei EpochIterationInfo
-
- id apireputation.PeerID
-
- consumersIter TrustIterator
-}
-
-func (c *Calculator) iterateDaughter(ctx context.Context, p iterDaughterPrm) {
- initTrust, err := c.prm.InitialTrustSource.InitialTrust(p.id)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure,
- zap.Stringer("daughter", p.id),
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ei, p.id)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorDaughterTrustIteratorsInitFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- sum := reputation.TrustZero
-
- err = p.consumersIter.Iterate(func(trust reputation.Trust) error {
- if !p.lastIter {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- }
-
- sum.Add(trust.Value())
- return nil
- })
- if err != nil {
- c.opts.log.Debug(logs.CalculatorIterateOverDaughtersTrustsFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // Alpha * Pd
- initTrust.Mul(c.alpha)
-
- sum.Mul(c.beta)
- sum.Add(initTrust)
-
- var intermediateTrust eigentrust.IterationTrust
-
- intermediateTrust.SetEpoch(p.ei.Epoch())
- intermediateTrust.SetPeer(p.id)
- intermediateTrust.SetI(p.ei.I())
-
- if p.lastIter {
- c.processLastIteration(p, intermediateTrust, sum)
- } else {
- c.processIntermediateIteration(ctx, p, daughterIter, sum)
- }
-}
-
-func (c *Calculator) processLastIteration(p iterDaughterPrm, intermediateTrust eigentrust.IterationTrust, sum reputation.TrustValue) {
- finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ei)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorInitWriterFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- intermediateTrust.SetValue(sum)
-
- err = finalWriter.WriteIntermediateTrust(intermediateTrust)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorWriteFinalResultFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-}
-
-func (c *Calculator) processIntermediateIteration(ctx context.Context, p iterDaughterPrm, daughterIter TrustIterator, sum reputation.TrustValue) {
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ei)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorInitWriterFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- err = daughterIter.Iterate(func(trust reputation.Trust) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- val := trust.Value()
- val.Mul(sum)
-
- trust.SetValue(val)
-
- err := intermediateWriter.Write(ctx, trust)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorWriteValueFailure,
- zap.String("error", err.Error()),
- )
- }
-
- return nil
- })
- if err != nil {
- c.opts.log.Debug(logs.CalculatorIterateDaughterTrustsFailure,
- zap.String("error", err.Error()),
- )
- }
-
- err = intermediateWriter.Close(ctx)
- if err != nil {
- c.opts.log.Error(
- "could not close writer",
- zap.String("error", err.Error()),
- )
- }
-}
-
-func (c *Calculator) sendInitialValues(ctx context.Context, epochInfo EpochIterationInfo) {
- daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(epochInfo)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorAllDaughtersTrustIteratorsInitFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(epochInfo)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorInitWriterFailure,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- err = daughterIter.Iterate(func(daughter apireputation.PeerID, iterator TrustIterator) error {
- return iterator.Iterate(func(trust reputation.Trust) error {
- trusted := trust.Peer()
-
- initTrust, err := c.prm.InitialTrustSource.InitialTrust(trusted)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorGetInitialTrustFailure,
- zap.Stringer("peer", trusted),
- zap.String("error", err.Error()),
- )
-
- // don't stop on single failure
- return nil
- }
-
- initTrust.Mul(trust.Value())
- trust.SetValue(initTrust)
-
- err = intermediateWriter.Write(ctx, trust)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorWriteValueFailure,
- zap.String("error", err.Error()),
- )
-
- // don't stop on single failure
- }
-
- return nil
- })
- })
- if err != nil {
- c.opts.log.Debug(logs.CalculatorIterateOverAllDaughtersFailure,
- zap.String("error", err.Error()),
- )
- }
-
- err = intermediateWriter.Close(ctx)
- if err != nil {
- c.opts.log.Debug(logs.CalculatorCouldNotCloseWriter,
- zap.String("error", err.Error()),
- )
- }
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/deps.go b/pkg/services/reputation/eigentrust/calculator/deps.go
deleted file mode 100644
index a22d1df76..000000000
--- a/pkg/services/reputation/eigentrust/calculator/deps.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package eigentrustcalc
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type EpochIterationInfo interface {
- // Must return epoch number to select the values
- // for global trust calculation.
- Epoch() uint64
-
- // Must return the sequence number of the iteration.
- I() uint32
-}
-
-// InitialTrustSource must provide initial(non-calculated)
-// trusts to current node's daughter. Realization may depends
-// on daughter.
-type InitialTrustSource interface {
- InitialTrust(apireputation.PeerID) (reputation.TrustValue, error)
-}
-
-// TrustIterator must iterate over all retrieved(or calculated) trusts
-// and call passed TrustHandler on them.
-type TrustIterator interface {
- Iterate(reputation.TrustHandler) error
-}
-
-type PeerTrustsHandler func(apireputation.PeerID, TrustIterator) error
-
-// PeerTrustsIterator must iterate over all nodes(PeerIDs) and provide
-// TrustIterator for iteration over node's Trusts to others peers.
-type PeerTrustsIterator interface {
- Iterate(PeerTrustsHandler) error
-}
-
-type DaughterTrustIteratorProvider interface {
- // InitDaughterIterator must init TrustIterator
- // that iterates over received local trusts from
- // daughter p for epochInfo.Epoch() epoch.
- InitDaughterIterator(epochInfo EpochIterationInfo, p apireputation.PeerID) (TrustIterator, error)
- // InitAllDaughtersIterator must init PeerTrustsIterator
- // that must iterate over all daughters of the current
- // node(manager) and all trusts received from them for
- // epochInfo.Epoch() epoch.
- InitAllDaughtersIterator(epochInfo EpochIterationInfo) (PeerTrustsIterator, error)
- // InitConsumersIterator must init PeerTrustsIterator
- // that must iterate over all daughters of the current
- // node(manager) and their consumers' trusts received
- // from other managers for epochInfo.Epoch() epoch and
- // epochInfo.I() iteration.
- InitConsumersIterator(EpochIterationInfo) (PeerTrustsIterator, error)
-}
-
-// IntermediateWriter must write intermediate result to contract.
-// It may depends on realization either trust is sent directly to contract
-// or via redirecting to other node.
-type IntermediateWriter interface {
- WriteIntermediateTrust(eigentrust.IterationTrust) error
-}
-
-// IntermediateWriterProvider must provide ready-to-work
-// IntermediateWriter.
-type IntermediateWriterProvider interface {
- InitIntermediateWriter(EpochIterationInfo) (IntermediateWriter, error)
-}
-
-// AlphaProvider must provide information about required
-// alpha parameter for eigen trust algorithm.
-type AlphaProvider interface {
- EigenTrustAlpha() (float64, error)
-}
diff --git a/pkg/services/reputation/eigentrust/calculator/opts.go b/pkg/services/reputation/eigentrust/calculator/opts.go
deleted file mode 100644
index e1e572361..000000000
--- a/pkg/services/reputation/eigentrust/calculator/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package eigentrustcalc
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/eigentrust/controller/calls.go b/pkg/services/reputation/eigentrust/controller/calls.go
deleted file mode 100644
index 886daf9be..000000000
--- a/pkg/services/reputation/eigentrust/controller/calls.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package eigentrustctrl
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- "go.uber.org/zap"
-)
-
-// ContinuePrm groups the required parameters of Continue operation.
-type ContinuePrm struct {
- Epoch uint64
-}
-
-type iterContext struct {
- eigentrust.EpochIteration
-
- iterationNumber uint32
- last bool
-}
-
-func (x iterContext) Last() bool {
- return x.last
-}
-
-// Continue moves the global reputation calculator to the next iteration.
-func (c *Controller) Continue(ctx context.Context, prm ContinuePrm) {
- c.mtx.Lock()
-
- {
- iterCtx, ok := c.mCtx[prm.Epoch]
- if !ok {
- iterCtx = new(iterContext)
- c.mCtx[prm.Epoch] = iterCtx
-
- iterCtx.EpochIteration.SetEpoch(prm.Epoch)
-
- iterations, err := c.prm.IterationsProvider.EigenTrustIterations()
- if err != nil {
- c.opts.log.Error(logs.ControllerCouldNotGetEigenTrustIterationNumber,
- zap.Error(err),
- )
- } else {
- iterCtx.iterationNumber = uint32(iterations)
- }
- }
-
- iterCtx.last = iterCtx.I() == iterCtx.iterationNumber-1
-
- err := c.prm.WorkerPool.Submit(func() {
- c.prm.DaughtersTrustCalculator.Calculate(ctx, iterCtx)
-
- // iteration++
- iterCtx.Increment()
- })
- if err != nil {
- c.opts.log.Debug(logs.ControllerIterationSubmitFailure,
- zap.String("error", err.Error()),
- )
- }
-
- if iterCtx.last {
- // will only live while the application is alive.
- // during normal operation of the system. Also, such information
- // number as already processed, but in any case it grows up
- // In this case and worker pool failure we can mark epoch
- delete(c.mCtx, prm.Epoch)
- }
- }
-
- c.mtx.Unlock()
-}
diff --git a/pkg/services/reputation/eigentrust/controller/controller.go b/pkg/services/reputation/eigentrust/controller/controller.go
deleted file mode 100644
index a6d0d4a82..000000000
--- a/pkg/services/reputation/eigentrust/controller/controller.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package eigentrustctrl
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
-)
-
-// Prm groups the required parameters of the Controller's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Component of computing iteration of EigenTrust algorithm.
- //
- // Must not be nil.
- DaughtersTrustCalculator DaughtersTrustCalculator
-
- // IterationsProvider provides information about numbers
- // of iterations for algorithm.
- IterationsProvider IterationsProvider
-
- // Routine execution pool for single epoch iteration.
- WorkerPool util.WorkerPool
-}
-
-// Controller represents EigenTrust algorithm transient controller.
-//
-// Controller's main goal is to separate the two main stages of
-// the calculation:
-// 1. reporting local values to manager nodes
-// 2. calculating global trusts of child nodes
-//
-// Calculation stages are controlled based on external signals
-// that come from the application through the Controller's API.
-//
-// For correct operation, the controller must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the constructor is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Controller struct {
- prm Prm
-
- opts *options
-
- mtx sync.Mutex
- mCtx map[uint64]*iterContext
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Controller.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Controller does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Controller {
- switch {
- case prm.IterationsProvider == nil:
- panicOnPrmValue("IterationNumber", prm.IterationsProvider)
- case prm.WorkerPool == nil:
- panicOnPrmValue("WorkerPool", prm.WorkerPool)
- case prm.DaughtersTrustCalculator == nil:
- panicOnPrmValue("DaughtersTrustCalculator", prm.DaughtersTrustCalculator)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Controller{
- prm: prm,
- opts: o,
- mCtx: make(map[uint64]*iterContext),
- }
-}
diff --git a/pkg/services/reputation/eigentrust/controller/deps.go b/pkg/services/reputation/eigentrust/controller/deps.go
deleted file mode 100644
index c068f7cc4..000000000
--- a/pkg/services/reputation/eigentrust/controller/deps.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package eigentrustctrl
-
-import "context"
-
-// IterationContext is a context of the i-th
-// stage of iterative EigenTrust algorithm.
-type IterationContext interface {
- // Must return epoch number to select the values
- // for global trust calculation.
- Epoch() uint64
-
- // Must return the sequence number of the iteration.
- I() uint32
-
- // Must return true if I() is the last iteration.
- Last() bool
-}
-
-// DaughtersTrustCalculator is an interface of entity
-// responsible for calculating the global trust of
-// daughter nodes in terms of EigenTrust algorithm.
-type DaughtersTrustCalculator interface {
- // Must perform the iteration step of the loop
- // for computing the global trust of all daughter
- // nodes and sending intermediate values
- // according to EigenTrust description
- // http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Ch.5.1.
- //
- // Execution should be interrupted if ctx.Last().
- Calculate(ctx context.Context, iter IterationContext)
-}
-
-// IterationsProvider must provide information about numbers
-// of iterations for algorithm.
-type IterationsProvider interface {
- EigenTrustIterations() (uint64, error)
-}
diff --git a/pkg/services/reputation/eigentrust/controller/opts.go b/pkg/services/reputation/eigentrust/controller/opts.go
deleted file mode 100644
index 16bc61c2f..000000000
--- a/pkg/services/reputation/eigentrust/controller/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package eigentrustctrl
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/eigentrust/iteration.go b/pkg/services/reputation/eigentrust/iteration.go
deleted file mode 100644
index e4793f044..000000000
--- a/pkg/services/reputation/eigentrust/iteration.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package eigentrust
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
-)
-
-type EpochIteration struct {
- e uint64
- i uint32
-}
-
-func (x EpochIteration) Epoch() uint64 {
- return x.e
-}
-
-func (x *EpochIteration) SetEpoch(e uint64) {
- x.e = e
-}
-
-func (x EpochIteration) I() uint32 {
- return x.i
-}
-
-func (x *EpochIteration) SetI(i uint32) {
- x.i = i
-}
-
-func (x *EpochIteration) Increment() {
- x.i++
-}
-
-type IterationTrust struct {
- EpochIteration
- reputation.Trust
-}
-
-func NewEpochIteration(epoch uint64, iter uint32) *EpochIteration {
- ei := EpochIteration{}
-
- ei.SetI(iter)
- ei.SetEpoch(epoch)
-
- return &ei
-}
diff --git a/pkg/services/reputation/eigentrust/routes/builder.go b/pkg/services/reputation/eigentrust/routes/builder.go
deleted file mode 100644
index ddd5a2ae0..000000000
--- a/pkg/services/reputation/eigentrust/routes/builder.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Builder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Manager builder for current node.
- //
- // Must not be nil.
- ManagerBuilder common.ManagerBuilder
-
- Log *logger.Logger
-}
-
-// Builder represents component that routes node to its managers.
-//
-// For correct operation, Builder must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Builder is immediately ready to work through API.
-type Builder struct {
- managerBuilder common.ManagerBuilder
- log *logger.Logger
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Builder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Builder does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Builder {
- switch {
- case prm.ManagerBuilder == nil:
- panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder)
- case prm.Log == nil:
- panicOnPrmValue("Logger", prm.Log)
- }
-
- return &Builder{
- managerBuilder: prm.ManagerBuilder,
- log: prm.Log,
- }
-}
diff --git a/pkg/services/reputation/eigentrust/routes/calls.go b/pkg/services/reputation/eigentrust/routes/calls.go
deleted file mode 100644
index ccb2fe8ea..000000000
--- a/pkg/services/reputation/eigentrust/routes/calls.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// NextStage builds Manager list for trusted node and returns it directly.
-//
-// If passed route has more than one point, then endpoint of the route is reached.
-func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
- passedLen := len(passed)
-
- b.log.Debug(logs.RoutesBuildingNextStageForTrustRoute,
- zap.Uint64("epoch", epoch),
- zap.Int("passed_length", passedLen),
- )
-
- if passedLen > 1 {
- return nil, nil
- }
-
- route, err := b.managerBuilder.BuildManagers(epoch, t.Peer())
- if err != nil {
- return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err)
- }
-
- return route, nil
-}
diff --git a/pkg/services/reputation/eigentrust/storage/consumers/calls.go b/pkg/services/reputation/eigentrust/storage/consumers/calls.go
deleted file mode 100644
index 55a4d6f3d..000000000
--- a/pkg/services/reputation/eigentrust/storage/consumers/calls.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package consumerstorage
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// Put saves intermediate trust of the consumer to daughter peer.
-func (x *Storage) Put(trust eigentrust.IterationTrust) {
- var s *iterationConsumersStorage
-
- x.mtx.Lock()
-
- {
- epoch := trust.Epoch()
-
- s = x.mItems[epoch]
- if s == nil {
- s = &iterationConsumersStorage{
- mItems: make(map[uint32]*ConsumersStorage, 1),
- }
-
- x.mItems[epoch] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// Consumers returns the storage of trusts of the consumers of the daughter peers
-// for particular iteration of EigenTrust calculation for particular epoch.
-//
-// Returns false if there is no data for the epoch and iter.
-func (x *Storage) Consumers(epoch uint64, iter uint32) (*ConsumersStorage, bool) {
- var (
- s *iterationConsumersStorage
- ok bool
- )
-
- x.mtx.Lock()
-
- {
- s, ok = x.mItems[epoch]
- }
-
- x.mtx.Unlock()
-
- if !ok {
- return nil, false
- }
-
- return s.consumers(iter)
-}
-
-// maps iteration numbers of EigenTrust algorithm to repositories
-// of the trusts of the consumers of the daughter peers.
-type iterationConsumersStorage struct {
- mtx sync.RWMutex
-
- mItems map[uint32]*ConsumersStorage
-}
-
-func (x *iterationConsumersStorage) put(trust eigentrust.IterationTrust) {
- var s *ConsumersStorage
-
- x.mtx.Lock()
-
- {
- iter := trust.I()
-
- s = x.mItems[iter]
- if s == nil {
- s = &ConsumersStorage{
- mItems: make(map[string]*ConsumersTrusts, 1),
- }
-
- x.mItems[iter] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-func (x *iterationConsumersStorage) consumers(iter uint32) (s *ConsumersStorage, ok bool) {
- x.mtx.Lock()
-
- {
- s, ok = x.mItems[iter]
- }
-
- x.mtx.Unlock()
-
- return
-}
-
-// ConsumersStorage represents in-memory storage of intermediate trusts
-// of the peer consumers.
-//
-// Maps daughter peers to repositories of the trusts of their consumers.
-type ConsumersStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*ConsumersTrusts
-}
-
-func (x *ConsumersStorage) put(trust eigentrust.IterationTrust) {
- var s *ConsumersTrusts
-
- x.mtx.Lock()
-
- {
- daughter := trust.Peer().EncodeToString()
-
- s = x.mItems[daughter]
- if s == nil {
- s = &ConsumersTrusts{
- mItems: make(map[string]reputation.Trust, 1),
- }
-
- x.mItems[daughter] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// Iterate passes IDs of the daughter peers with the trusts of their consumers to h.
-//
-// Returns errors from h directly.
-func (x *ConsumersStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) {
- x.mtx.RLock()
-
- {
- for strTrusted, trusts := range x.mItems {
- var trusted apireputation.PeerID
-
- if strTrusted != "" {
- err = trusted.DecodeString(strTrusted)
- if err != nil {
- panic(fmt.Sprintf("decode peer ID string %s: %v", strTrusted, err))
- }
- }
-
- if err = h(trusted, trusts); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-// ConsumersTrusts represents in-memory storage of the trusts
-// of the consumer peers to some other peer.
-type ConsumersTrusts struct {
- mtx sync.RWMutex
-
- mItems map[string]reputation.Trust
-}
-
-func (x *ConsumersTrusts) put(trust eigentrust.IterationTrust) {
- x.mtx.Lock()
-
- {
- x.mItems[trust.TrustingPeer().EncodeToString()] = trust.Trust
- }
-
- x.mtx.Unlock()
-}
-
-// Iterate passes all stored trusts to h.
-//
-// Returns errors from h directly.
-func (x *ConsumersTrusts) Iterate(h reputation.TrustHandler) (err error) {
- x.mtx.RLock()
-
- {
- for _, trust := range x.mItems {
- if err = h(trust); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/eigentrust/storage/consumers/storage.go b/pkg/services/reputation/eigentrust/storage/consumers/storage.go
deleted file mode 100644
index ee811d84b..000000000
--- a/pkg/services/reputation/eigentrust/storage/consumers/storage.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package consumerstorage
-
-import (
- "sync"
-)
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-//
-// The component is not parameterizable at the moment.
-type Prm struct{}
-
-// Storage represents in-memory storage of the trusts
-// of the consumer peers.
-//
-// It maps epoch numbers to the repositories of intermediate
-// trusts of the consumers of the daughter peers.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- mtx sync.RWMutex
-
- mItems map[uint64]*iterationConsumersStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(_ Prm) *Storage {
- return &Storage{
- mItems: make(map[uint64]*iterationConsumersStorage),
- }
-}
diff --git a/pkg/services/reputation/eigentrust/storage/daughters/calls.go b/pkg/services/reputation/eigentrust/storage/daughters/calls.go
deleted file mode 100644
index eb229365e..000000000
--- a/pkg/services/reputation/eigentrust/storage/daughters/calls.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package daughters
-
-import (
- "fmt"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// Put saves daughter peer's trust to its provider for the epoch.
-func (x *Storage) Put(epoch uint64, trust reputation.Trust) {
- var s *DaughterStorage
-
- x.mtx.Lock()
-
- {
- s = x.mItems[epoch]
- if s == nil {
- s = &DaughterStorage{
- mItems: make(map[string]*DaughterTrusts, 1),
- }
-
- x.mItems[epoch] = s
- }
- }
-
- x.mtx.Unlock()
-
- s.put(trust)
-}
-
-// DaughterTrusts returns daughter trusts for the epoch.
-//
-// Returns false if there is no data for the epoch and daughter.
-func (x *Storage) DaughterTrusts(epoch uint64, daughter apireputation.PeerID) (*DaughterTrusts, bool) {
- var (
- s *DaughterStorage
- ok bool
- )
-
- x.mtx.RLock()
-
- {
- s, ok = x.mItems[epoch]
- }
-
- x.mtx.RUnlock()
-
- if !ok {
- return nil, false
- }
-
- return s.daughterTrusts(daughter)
-}
-
-// AllDaughterTrusts returns daughter iterator for the epoch.
-//
-// Returns false if there is no data for the epoch and daughter.
-func (x *Storage) AllDaughterTrusts(epoch uint64) (*DaughterStorage, bool) {
- x.mtx.RLock()
- defer x.mtx.RUnlock()
-
- s, ok := x.mItems[epoch]
-
- return s, ok
-}
-
-// DaughterStorage maps IDs of daughter peers to repositories of the local trusts to their providers.
-type DaughterStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*DaughterTrusts
-}
-
-// Iterate passes IDs of the daughter peers with their trusts to h.
-//
-// Returns errors from h directly.
-func (x *DaughterStorage) Iterate(h eigentrustcalc.PeerTrustsHandler) (err error) {
- x.mtx.RLock()
-
- {
- for strDaughter, daughterTrusts := range x.mItems {
- var daughter apireputation.PeerID
-
- if strDaughter != "" {
- err = daughter.DecodeString(strDaughter)
- if err != nil {
- panic(fmt.Sprintf("decode peer ID string %s: %v", strDaughter, err))
- }
- }
-
- if err = h(daughter, daughterTrusts); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-func (x *DaughterStorage) put(trust reputation.Trust) {
- var dt *DaughterTrusts
-
- x.mtx.Lock()
-
- {
- trusting := trust.TrustingPeer().EncodeToString()
-
- dt = x.mItems[trusting]
- if dt == nil {
- dt = &DaughterTrusts{
- mItems: make(map[string]reputation.Trust, 1),
- }
-
- x.mItems[trusting] = dt
- }
- }
-
- x.mtx.Unlock()
-
- dt.put(trust)
-}
-
-func (x *DaughterStorage) daughterTrusts(id apireputation.PeerID) (dt *DaughterTrusts, ok bool) {
- x.mtx.RLock()
-
- {
- dt, ok = x.mItems[id.EncodeToString()]
- }
-
- x.mtx.RUnlock()
-
- return
-}
-
-// DaughterTrusts represents in-memory storage of local trusts
-// of the daughter peer to its providers.
-//
-// Maps IDs of daughter's providers to the local trusts to them.
-type DaughterTrusts struct {
- mtx sync.RWMutex
-
- mItems map[string]reputation.Trust
-}
-
-func (x *DaughterTrusts) put(trust reputation.Trust) {
- x.mtx.Lock()
-
- {
- x.mItems[trust.Peer().EncodeToString()] = trust
- }
-
- x.mtx.Unlock()
-}
-
-// Iterate passes all stored trusts to h.
-//
-// Returns errors from h directly.
-func (x *DaughterTrusts) Iterate(h reputation.TrustHandler) (err error) {
- x.mtx.RLock()
-
- {
- for _, trust := range x.mItems {
- if err = h(trust); err != nil {
- break
- }
- }
- }
-
- x.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/eigentrust/storage/daughters/storage.go b/pkg/services/reputation/eigentrust/storage/daughters/storage.go
deleted file mode 100644
index 26399fce4..000000000
--- a/pkg/services/reputation/eigentrust/storage/daughters/storage.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package daughters
-
-import "sync"
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-//
-// The component is not parameterizable at the moment.
-type Prm struct{}
-
-// Storage represents in-memory storage of local trust
-// values of the daughter peers.
-//
-// It maps epoch numbers to the repositories of local trusts
-// of the daughter peers.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- mtx sync.RWMutex
-
- mItems map[uint64]*DaughterStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(_ Prm) *Storage {
- return &Storage{
- mItems: make(map[uint64]*DaughterStorage),
- }
-}
diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go
deleted file mode 100644
index 1cad09313..000000000
--- a/pkg/services/reputation/local/controller/calls.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package trustcontroller
-
-import (
- "context"
- "errors"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// ReportPrm groups the required parameters of the Controller.Report method.
-type ReportPrm struct {
- epoch uint64
-}
-
-// SetEpoch sets epoch number to select reputation values.
-func (p *ReportPrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// Report reports local reputation values.
-//
-// Single Report operation overtakes all data from LocalTrustSource
-// to LocalTrustTarget (Controller's parameters).
-//
-// Each call acquires a report context for an Epoch parameter.
-// At the very end of the operation, the context is released.
-func (c *Controller) Report(ctx context.Context, prm ReportPrm) {
- // acquire report
- rCtx, reporter := c.acquireReporter(ctx, prm.epoch)
- if reporter == nil {
- return
- }
-
- // report local trust values
- reporter.report(rCtx)
-
- // finally stop and free the report
- c.freeReport(prm.epoch, reporter.log)
-}
-
-type reporter struct {
- epoch uint64
-
- ctrl *Controller
-
- log *logger.Logger
-
- ep common.EpochProvider
-}
-
-type epochProvider struct {
- epoch uint64
-}
-
-func (c epochProvider) Epoch() uint64 {
- return c.epoch
-}
-
-func (c *Controller) acquireReporter(ctx context.Context, epoch uint64) (context.Context, *reporter) {
- started := true
-
- c.mtx.Lock()
- {
- if cancel := c.mCtx[epoch]; cancel == nil {
- ctx, cancel = context.WithCancel(ctx)
- c.mCtx[epoch] = cancel
- started = false
- }
- }
- c.mtx.Unlock()
-
- log := &logger.Logger{Logger: c.opts.log.With(
- zap.Uint64("epoch", epoch),
- )}
-
- if started {
- log.Debug(logs.ControllerReportIsAlreadyStarted)
- return ctx, nil
- }
-
- return ctx, &reporter{
- epoch: epoch,
- ctrl: c,
- log: log,
- ep: &epochProvider{
- epoch: epoch,
- },
- }
-}
-
-func (c *reporter) report(ctx context.Context) {
- c.log.Debug(logs.ControllerStartingToReportLocalTrustValues)
-
- // initialize iterator over locally collected values
- iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ep)
- if err != nil {
- c.log.Debug(logs.ControllerCouldNotInitializeIteratorOverLocalTrustValues,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // initialize target of local trust values
- targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ep)
- if err != nil {
- c.log.Debug(logs.ControllerCouldNotInitializeLocalTrustTarget,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // iterate over all values and write them to the target
- err = iterator.Iterate(
- func(t reputation.Trust) error {
- // check if context is done
- if err := ctx.Err(); err != nil {
- return err
- }
-
- return targetWriter.Write(ctx, t)
- },
- )
- if err != nil && !errors.Is(err, context.Canceled) {
- c.log.Debug(logs.ControllerIteratorOverLocalTrustFailed,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- // finish writing
- err = targetWriter.Close(ctx)
- if err != nil {
- c.log.Debug(logs.ControllerCouldNotFinishWritingLocalTrustValues,
- zap.String("error", err.Error()),
- )
-
- return
- }
-
- c.log.Debug(logs.ControllerReportingSuccessfullyFinished)
-}
-
-func (c *Controller) freeReport(epoch uint64, log *logger.Logger) {
- var stopped bool
-
- c.mtx.Lock()
-
- {
- var cancel context.CancelFunc
-
- cancel, stopped = c.mCtx[epoch]
-
- if stopped {
- cancel()
- delete(c.mCtx, epoch)
- }
- }
-
- c.mtx.Unlock()
-
- if stopped {
- log.Debug(logs.ControllerReportingSuccessfullyInterrupted)
- } else {
- log.Debug(logs.ControllerReportingIsNotStartedOrAlreadyInterrupted)
- }
-}
-
-// StopPrm groups the required parameters of the Controller.Stop method.
-type StopPrm struct {
- epoch uint64
-}
-
-// SetEpoch sets epoch number the processing of the values of which must be interrupted.
-func (p *StopPrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// Stop interrupts the processing of local trust values.
-//
-// Releases acquired report context.
-func (c *Controller) Stop(prm StopPrm) {
- c.freeReport(
- prm.epoch,
- &logger.Logger{Logger: c.opts.log.With(zap.Uint64("epoch", prm.epoch))},
- )
-}
diff --git a/pkg/services/reputation/local/controller/controller.go b/pkg/services/reputation/local/controller/controller.go
deleted file mode 100644
index 373df36db..000000000
--- a/pkg/services/reputation/local/controller/controller.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package trustcontroller
-
-import (
- "context"
- "fmt"
- "sync"
-
- reputationrouter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common/router"
-)
-
-// Prm groups the required parameters of the Controller's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Iterator over the reputation values
- // collected by the node locally.
- //
- // Must not be nil.
- LocalTrustSource IteratorProvider
-
- // Place of recording the local values of
- // trust to other nodes.
- //
- // Must not be nil.
- LocalTrustTarget *reputationrouter.Router
-}
-
-// Controller represents main handler for starting
-// and interrupting the reporting local trust values.
-//
-// It binds the interfaces of the local value stores
-// to the target storage points. Controller is abstracted
-// from the internal storage device and the network location
-// of the connecting components. At its core, it is a
-// high-level start-stop trigger for reporting.
-//
-// For correct operation, the controller must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the constructor is immediately ready to work through
-// API of external control of calculations and data transfer.
-type Controller struct {
- prm Prm
-
- opts *options
-
- mtx sync.Mutex
- mCtx map[uint64]context.CancelFunc
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Controller.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Controller does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm, opts ...Option) *Controller {
- switch {
- case prm.LocalTrustSource == nil:
- panicOnPrmValue("LocalTrustSource", prm.LocalTrustSource)
- case prm.LocalTrustTarget == nil:
- panicOnPrmValue("LocalTrustTarget", prm.LocalTrustTarget)
- }
-
- o := defaultOpts()
-
- for _, opt := range opts {
- opt(o)
- }
-
- return &Controller{
- prm: prm,
- opts: o,
- mCtx: make(map[uint64]context.CancelFunc),
- }
-}
diff --git a/pkg/services/reputation/local/controller/deps.go b/pkg/services/reputation/local/controller/deps.go
deleted file mode 100644
index 6f4a29c99..000000000
--- a/pkg/services/reputation/local/controller/deps.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package trustcontroller
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-)
-
-// Iterator is a group of methods provided by entity
-// which can iterate over a group of reputation.Trust values.
-type Iterator interface {
- // Iterate must start an iterator over all trust values.
- // For each value should call a handler, the error
- // of which should be directly returned from the method.
- //
- // Internal failures of the iterator are also signaled via
- // an error. After a successful call to the last value
- // handler, nil should be returned.
- Iterate(reputation.TrustHandler) error
-}
-
-// IteratorProvider is a group of methods provided
-// by entity which generates iterators over
-// reputation.Trust values.
-type IteratorProvider interface {
- // InitIterator should return an initialized Iterator
- // that iterates over values from IteratorContext.Epoch() epoch.
- //
- // Initialization problems are reported via error.
- // If no error was returned, then the Iterator must not be nil.
- //
- // Implementations can have different logic for different
- // contexts, so specific ones may document their own behavior.
- InitIterator(common.EpochProvider) (Iterator, error)
-}
diff --git a/pkg/services/reputation/local/controller/opts.go b/pkg/services/reputation/local/controller/opts.go
deleted file mode 100644
index 385a4243b..000000000
--- a/pkg/services/reputation/local/controller/opts.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package trustcontroller
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
-)
-
-// Option sets an optional parameter of Controller.
-type Option func(*options)
-
-type options struct {
- log *logger.Logger
-}
-
-func defaultOpts() *options {
- return &options{
- log: &logger.Logger{Logger: zap.L()},
- }
-}
-
-// WithLogger returns option to specify logging component.
-//
-// Ignores nil values.
-func WithLogger(l *logger.Logger) Option {
- return func(o *options) {
- if l != nil {
- o.log = l
- }
- }
-}
diff --git a/pkg/services/reputation/local/controller/util.go b/pkg/services/reputation/local/controller/util.go
deleted file mode 100644
index 122550498..000000000
--- a/pkg/services/reputation/local/controller/util.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package trustcontroller
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
-
-type storageWrapper struct {
- w common.Writer
- i Iterator
-}
-
-func (s storageWrapper) InitIterator(common.EpochProvider) (Iterator, error) {
- return s.i, nil
-}
-
-func (s storageWrapper) InitWriter(common.EpochProvider) (common.Writer, error) {
- return s.w, nil
-}
-
-// SimpleIteratorProvider returns IteratorProvider that provides
-// static context-independent Iterator.
-func SimpleIteratorProvider(i Iterator) IteratorProvider {
- return &storageWrapper{
- i: i,
- }
-}
-
-// SimpleWriterProvider returns WriterProvider that provides
-// static context-independent Writer.
-func SimpleWriterProvider(w common.Writer) common.WriterProvider {
- return &storageWrapper{
- w: w,
- }
-}
diff --git a/pkg/services/reputation/local/routes/builder.go b/pkg/services/reputation/local/routes/builder.go
deleted file mode 100644
index ddd5a2ae0..000000000
--- a/pkg/services/reputation/local/routes/builder.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
-)
-
-// Prm groups the required parameters of the Builder's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct {
- // Manager builder for current node.
- //
- // Must not be nil.
- ManagerBuilder common.ManagerBuilder
-
- Log *logger.Logger
-}
-
-// Builder represents component that routes node to its managers.
-//
-// For correct operation, Builder must be created using
-// the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// the Builder is immediately ready to work through API.
-type Builder struct {
- managerBuilder common.ManagerBuilder
- log *logger.Logger
-}
-
-const invalidPrmValFmt = "invalid parameter %s (%T):%v"
-
-func panicOnPrmValue(n string, v any) {
- panic(fmt.Sprintf(invalidPrmValFmt, n, v, v))
-}
-
-// New creates a new instance of the Builder.
-//
-// Panics if at least one value of the parameters is invalid.
-//
-// The created Builder does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Builder {
- switch {
- case prm.ManagerBuilder == nil:
- panicOnPrmValue("ManagerBuilder", prm.ManagerBuilder)
- case prm.Log == nil:
- panicOnPrmValue("Logger", prm.Log)
- }
-
- return &Builder{
- managerBuilder: prm.ManagerBuilder,
- log: prm.Log,
- }
-}
diff --git a/pkg/services/reputation/local/routes/calls.go b/pkg/services/reputation/local/routes/calls.go
deleted file mode 100644
index 2f99f0e10..000000000
--- a/pkg/services/reputation/local/routes/calls.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package routes
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
- "go.uber.org/zap"
-)
-
-// NextStage builds Manager list for trusting node and returns it directly.
-//
-// If passed route has more than one point, then endpoint of the route is reached.
-func (b *Builder) NextStage(epoch uint64, t reputation.Trust, passed []common.ServerInfo) ([]common.ServerInfo, error) {
- passedLen := len(passed)
-
- b.log.Debug(logs.RoutesBuildingNextStageForLocalTrustRoute,
- zap.Uint64("epoch", epoch),
- zap.Int("passed_length", passedLen),
- )
-
- if passedLen > 1 {
- return nil, nil
- }
-
- route, err := b.managerBuilder.BuildManagers(epoch, t.TrustingPeer())
- if err != nil {
- return nil, fmt.Errorf("could not build managers for epoch: %d: %w", epoch, err)
- }
-
- return route, nil
-}
diff --git a/pkg/services/reputation/local/storage/calls.go b/pkg/services/reputation/local/storage/calls.go
deleted file mode 100644
index 14acbb64f..000000000
--- a/pkg/services/reputation/local/storage/calls.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package truststorage
-
-import (
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
- apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// UpdatePrm groups the parameters of Storage's Update operation.
-type UpdatePrm struct {
- sat bool
-
- epoch uint64
-
- peer apireputation.PeerID
-}
-
-// SetEpoch sets number of the epoch
-// when the interaction happened.
-func (p *UpdatePrm) SetEpoch(e uint64) {
- p.epoch = e
-}
-
-// SetPeer sets identifier of the peer
-// with which the local node interacted.
-func (p *UpdatePrm) SetPeer(id apireputation.PeerID) {
- p.peer = id
-}
-
-// SetSatisfactory sets successful completion status.
-func (p *UpdatePrm) SetSatisfactory(sat bool) {
- p.sat = sat
-}
-
-type trustValue struct {
- sat, all int
-}
-
-// EpochTrustValueStorage represents storage of
-// the trust values by particular epoch.
-type EpochTrustValueStorage struct {
- mtx sync.RWMutex
-
- mItems map[string]*trustValue
-}
-
-func newTrustValueStorage() *EpochTrustValueStorage {
- return &EpochTrustValueStorage{
- mItems: make(map[string]*trustValue, 1),
- }
-}
-
-func stringifyPeerID(id apireputation.PeerID) string {
- return string(id.PublicKey())
-}
-
-func peerIDFromString(str string) (res apireputation.PeerID) {
- res.SetPublicKey([]byte(str))
- return
-}
-
-func (s *EpochTrustValueStorage) update(prm UpdatePrm) {
- s.mtx.Lock()
-
- {
- strID := stringifyPeerID(prm.peer)
-
- val, ok := s.mItems[strID]
- if !ok {
- val = new(trustValue)
- s.mItems[strID] = val
- }
-
- if prm.sat {
- val.sat++
- }
-
- val.all++
- }
-
- s.mtx.Unlock()
-}
-
-// Update updates the number of satisfactory transactions with peer.
-func (s *Storage) Update(prm UpdatePrm) {
- var trustStorage *EpochTrustValueStorage
-
- s.mtx.Lock()
-
- {
- var (
- ok bool
- epoch = prm.epoch
- )
-
- trustStorage, ok = s.mItems[epoch]
- if !ok {
- trustStorage = newTrustValueStorage()
- s.mItems[epoch] = trustStorage
- }
- }
-
- s.mtx.Unlock()
-
- trustStorage.update(prm)
-}
-
-// ErrNoPositiveTrust is returned by iterator when
-// there is no positive number of successful transactions.
-var ErrNoPositiveTrust = errors.New("no positive trust")
-
-// DataForEpoch returns EpochValueStorage for epoch.
-//
-// If there is no data for the epoch, ErrNoPositiveTrust returns.
-func (s *Storage) DataForEpoch(epoch uint64) (*EpochTrustValueStorage, error) {
- s.mtx.RLock()
- trustStorage, ok := s.mItems[epoch]
- s.mtx.RUnlock()
-
- if !ok {
- return nil, ErrNoPositiveTrust
- }
-
- return trustStorage, nil
-}
-
-// Iterate iterates over normalized trust values and passes them to parameterized handler.
-//
-// Values are normalized according to http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5.
-// If divisor in formula is zero, ErrNoPositiveTrust returns.
-func (s *EpochTrustValueStorage) Iterate(h reputation.TrustHandler) (err error) {
- s.mtx.RLock()
-
- {
- var (
- sum reputation.TrustValue
- mVals = make(map[string]reputation.TrustValue, len(s.mItems))
- )
-
- // iterate first time to calculate normalizing divisor
- for strID, val := range s.mItems {
- if val.all > 0 {
- num := reputation.TrustValueFromInt(val.sat)
- denom := reputation.TrustValueFromInt(val.all)
-
- v := num.Div(denom)
-
- mVals[strID] = v
-
- sum.Add(v)
- }
- }
-
- err = ErrNoPositiveTrust
-
- if !sum.IsZero() {
- for strID, val := range mVals {
- t := reputation.Trust{}
-
- t.SetPeer(peerIDFromString(strID))
- t.SetValue(val.Div(sum))
-
- if err = h(t); err != nil {
- break
- }
- }
- }
- }
-
- s.mtx.RUnlock()
-
- return
-}
diff --git a/pkg/services/reputation/local/storage/storage.go b/pkg/services/reputation/local/storage/storage.go
deleted file mode 100644
index d7e54a3fc..000000000
--- a/pkg/services/reputation/local/storage/storage.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package truststorage
-
-import (
- "sync"
-)
-
-// Prm groups the required parameters of the Storage's constructor.
-//
-// All values must comply with the requirements imposed on them.
-// Passing incorrect parameter values will result in constructor
-// failure (error or panic depending on the implementation).
-type Prm struct{}
-
-// Storage represents in-memory storage of
-// local reputation values.
-//
-// Storage provides access to normalized local trust
-// values through iterator interface.
-//
-// For correct operation, Storage must be created
-// using the constructor (New) based on the required parameters
-// and optional components. After successful creation,
-// Storage is immediately ready to work through API.
-type Storage struct {
- prm Prm
-
- mtx sync.RWMutex
-
- mItems map[uint64]*EpochTrustValueStorage
-}
-
-// New creates a new instance of the Storage.
-//
-// The created Storage does not require additional
-// initialization and is completely ready for work.
-func New(prm Prm) *Storage {
- return &Storage{
- prm: prm,
- mItems: make(map[uint64]*EpochTrustValueStorage),
- }
-}
diff --git a/pkg/services/reputation/rpc/response.go b/pkg/services/reputation/rpc/response.go
deleted file mode 100644
index 808a0a476..000000000
--- a/pkg/services/reputation/rpc/response.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package reputationrpc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response"
-)
-
-type responseService struct {
- respSvc *response.Service
-
- svc Server
-}
-
-// NewResponseService returns reputation service server instance that passes
-// internal service call to response service.
-func NewResponseService(cnrSvc Server, respSvc *response.Service) Server {
- return &responseService{
- respSvc: respSvc,
- svc: cnrSvc,
- }
-}
-
-func (s *responseService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceLocalTrustResponse), nil
-}
-
-func (s *responseService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) {
- resp, err := s.respSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest))
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/services/reputation/rpc/server.go b/pkg/services/reputation/rpc/server.go
deleted file mode 100644
index 78af30ea7..000000000
--- a/pkg/services/reputation/rpc/server.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package reputationrpc
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
-)
-
-// Server is an interface of the FrostFS API v2 Reputation service server.
-type Server interface {
- AnnounceLocalTrust(context.Context, *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error)
- AnnounceIntermediateResult(context.Context, *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error)
-}
diff --git a/pkg/services/reputation/rpc/sign.go b/pkg/services/reputation/rpc/sign.go
deleted file mode 100644
index 9db06ff1e..000000000
--- a/pkg/services/reputation/rpc/sign.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package reputationrpc
-
-import (
- "context"
- "crypto/ecdsa"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/reputation"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util"
-)
-
-type signService struct {
- sigSvc *util.SignService
-
- svc Server
-}
-
-func NewSignService(key *ecdsa.PrivateKey, svc Server) Server {
- return &signService{
- sigSvc: util.NewUnarySignService(key),
- svc: svc,
- }
-}
-
-func (s *signService) AnnounceLocalTrust(ctx context.Context, req *reputation.AnnounceLocalTrustRequest) (*reputation.AnnounceLocalTrustResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceLocalTrust(ctx, req.(*reputation.AnnounceLocalTrustRequest))
- },
- func() util.ResponseMessage {
- return new(reputation.AnnounceLocalTrustResponse)
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceLocalTrustResponse), nil
-}
-
-func (s *signService) AnnounceIntermediateResult(ctx context.Context, req *reputation.AnnounceIntermediateResultRequest) (*reputation.AnnounceIntermediateResultResponse, error) {
- resp, err := s.sigSvc.HandleUnaryRequest(ctx, req,
- func(ctx context.Context, req any) (util.ResponseMessage, error) {
- return s.svc.AnnounceIntermediateResult(ctx, req.(*reputation.AnnounceIntermediateResultRequest))
- },
- func() util.ResponseMessage {
- return new(reputation.AnnounceIntermediateResultResponse)
- },
- )
- if err != nil {
- return nil, err
- }
-
- return resp.(*reputation.AnnounceIntermediateResultResponse), nil
-}
diff --git a/pkg/services/reputation/trust.go b/pkg/services/reputation/trust.go
deleted file mode 100644
index 8c5d9091a..000000000
--- a/pkg/services/reputation/trust.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package reputation
-
-import (
- "strconv"
-
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-// TrustValue represents the numeric value of the node's trust.
-type TrustValue float64
-
-const (
- // TrustOne is a trust value equal to one.
- TrustOne = TrustValue(1)
-
- // TrustZero is a trust value equal to zero.
- TrustZero = TrustValue(0)
-)
-
-// TrustValueFromFloat64 converts float64 to TrustValue.
-func TrustValueFromFloat64(v float64) TrustValue {
- return TrustValue(v)
-}
-
-// TrustValueFromInt converts int to TrustValue.
-func TrustValueFromInt(v int) TrustValue {
- return TrustValue(v)
-}
-
-func (v TrustValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// Float64 converts TrustValue to float64.
-func (v TrustValue) Float64() float64 {
- return float64(v)
-}
-
-// Add adds v2 to v.
-func (v *TrustValue) Add(v2 TrustValue) {
- *v = *v + v2
-}
-
-// Div returns the result of dividing v by v2.
-func (v TrustValue) Div(v2 TrustValue) TrustValue {
- return v / v2
-}
-
-// Mul multiplies v by v2.
-func (v *TrustValue) Mul(v2 TrustValue) {
- *v *= v2
-}
-
-// IsZero returns true if v equal to zero.
-func (v TrustValue) IsZero() bool {
- return v == 0
-}
-
-// Trust represents peer's trust (reputation).
-type Trust struct {
- trusting, peer reputation.PeerID
-
- val TrustValue
-}
-
-// TrustHandler describes the signature of the reputation.Trust
-// value handling function.
-//
-// Termination of processing without failures is usually signaled
-// with a zero error, while a specific value may describe the reason
-// for failure.
-type TrustHandler func(Trust) error
-
-// Value returns peer's trust value.
-func (t Trust) Value() TrustValue {
- return t.val
-}
-
-// SetValue sets peer's trust value.
-func (t *Trust) SetValue(val TrustValue) {
- t.val = val
-}
-
-// Peer returns trusted peer ID.
-func (t Trust) Peer() reputation.PeerID {
- return t.peer
-}
-
-// SetPeer sets trusted peer ID.
-func (t *Trust) SetPeer(id reputation.PeerID) {
- t.peer = id
-}
-
-// TrustingPeer returns trusting peer ID.
-func (t Trust) TrustingPeer() reputation.PeerID {
- return t.trusting
-}
-
-// SetTrustingPeer sets trusting peer ID.
-func (t *Trust) SetTrustingPeer(id reputation.PeerID) {
- t.trusting = id
-}
From 960e3c219e99254288d73b60404015b1bbe778a8 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 13:41:40 +0300
Subject: [PATCH 0114/1943] [#247] config, doc: Drop reputation references
Signed-off-by: Pavel Karpy
---
cmd/frostfs-adm/docs/deploy.md | 1 -
.../internal/modules/morph/initialize_deploy.go | 5 -----
cmd/frostfs-ir/defaults.go | 1 -
cmd/frostfs-node/config/contracts/config.go | 9 ---------
cmd/frostfs-node/config/contracts/config_test.go | 6 ------
config/example/ir.env | 2 --
config/example/ir.yaml | 2 --
config/example/node.env | 1 -
config/example/node.json | 1 -
config/example/node.yaml | 1 -
config/mainnet/config.yml | 1 -
config/testnet/config.yml | 1 -
docs/storage-node-configuration.md | 2 --
pkg/innerring/contracts.go | 2 --
14 files changed, 35 deletions(-)
diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md
index a1923bb2c..aead65fe0 100644
--- a/cmd/frostfs-adm/docs/deploy.md
+++ b/cmd/frostfs-adm/docs/deploy.md
@@ -147,7 +147,6 @@ NNS: Set container.frostfs -> cae60bdd689d185901e495352d0247752ce50846
NNS: Set frostfsid.frostfs -> c421fb60a3895865a8f24d197d6a80ef686041d2
NNS: Set netmap.frostfs -> 894eb854632f50fb124412ce7951ebc00763525e
NNS: Set proxy.frostfs -> ac6e6fe4b373d0ca0ca4969d1e58fa0988724e7d
-NNS: Set reputation.frostfs -> 6eda57c9d93d990573646762d1fea327ce41191f
Waiting for transactions to persist...
```
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
index 9e473463b..156b1f788 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
@@ -47,7 +47,6 @@ const (
frostfsIDContract = "frostfsid"
netmapContract = "netmap"
proxyContract = "proxy"
- reputationContract = "reputation"
subnetContract = "subnet"
)
@@ -64,7 +63,6 @@ var (
frostfsIDContract,
netmapContract,
proxyContract,
- reputationContract,
subnetContract,
}
@@ -81,8 +79,6 @@ var (
netmap.AuditFeeConfig,
netmap.ContainerFeeConfig,
netmap.ContainerAliasFeeConfig,
- netmap.EtIterationsConfig,
- netmap.EtAlphaConfig,
netmap.BasicIncomeRateConfig,
netmap.IrCandidateFeeConfig,
netmap.WithdrawFeeConfig,
@@ -602,7 +598,6 @@ func (c *initializeContext) getContractDeployData(ctrName string, keysParam []an
configParam)
case proxyContract:
items = nil
- case reputationContract:
case subnetContract:
default:
panic(fmt.Sprintf("invalid contract name: %s", ctrName))
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index 57959c1cf..837c1f170 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -163,7 +163,6 @@ func setContractsDefaults(cfg *viper.Viper) {
cfg.SetDefault("contracts.audit", "")
cfg.SetDefault("contracts.proxy", "")
cfg.SetDefault("contracts.processing", "")
- cfg.SetDefault("contracts.reputation", "")
cfg.SetDefault("contracts.subnet", "")
cfg.SetDefault("contracts.proxy", "")
}
diff --git a/cmd/frostfs-node/config/contracts/config.go b/cmd/frostfs-node/config/contracts/config.go
index 0450d0d57..c5f14f3ca 100644
--- a/cmd/frostfs-node/config/contracts/config.go
+++ b/cmd/frostfs-node/config/contracts/config.go
@@ -38,15 +38,6 @@ func Container(c *config.Config) util.Uint160 {
return contractAddress(c, "container")
}
-// Reputation returnsthe value of "reputation" config parameter
-// from "contracts" section.
-//
-// Returns zero filled script hash if the value is not set.
-// Throws panic if the value is not a 20-byte LE hex-encoded string.
-func Reputation(c *config.Config) util.Uint160 {
- return contractAddress(c, "reputation")
-}
-
// Proxy returnsthe value of "proxy" config parameter
// from "contracts" section.
//
diff --git a/cmd/frostfs-node/config/contracts/config_test.go b/cmd/frostfs-node/config/contracts/config_test.go
index 93956d6bb..d816ea1e4 100644
--- a/cmd/frostfs-node/config/contracts/config_test.go
+++ b/cmd/frostfs-node/config/contracts/config_test.go
@@ -18,7 +18,6 @@ func TestContractsSection(t *testing.T) {
require.Equal(t, emptyHash, contractsconfig.Balance(empty))
require.Equal(t, emptyHash, contractsconfig.Container(empty))
require.Equal(t, emptyHash, contractsconfig.Netmap(empty))
- require.Equal(t, emptyHash, contractsconfig.Reputation(empty))
require.Equal(t, emptyHash, contractsconfig.Proxy(empty))
})
@@ -33,9 +32,6 @@ func TestContractsSection(t *testing.T) {
expNetmap, err := util.Uint160DecodeStringLE("0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca")
require.NoError(t, err)
- expReputation, err := util.Uint160DecodeStringLE("441995f631c1da2b133462b71859494a5cd45e90")
- require.NoError(t, err)
-
expProxy, err := util.Uint160DecodeStringLE("ad7c6b55b737b696e5c82c85445040964a03e97f")
require.NoError(t, err)
@@ -43,13 +39,11 @@ func TestContractsSection(t *testing.T) {
balance := contractsconfig.Balance(c)
container := contractsconfig.Container(c)
netmap := contractsconfig.Netmap(c)
- reputation := contractsconfig.Reputation(c)
proxy := contractsconfig.Proxy(c)
require.Equal(t, expBalance, balance)
require.Equal(t, expConatiner, container)
require.Equal(t, expNetmap, netmap)
- require.Equal(t, expReputation, reputation)
require.Equal(t, expProxy, proxy)
}
diff --git a/config/example/ir.env b/config/example/ir.env
index 2ec821932..b7816dd77 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -49,7 +49,6 @@ FROSTFS_IR_WORKERS_BALANCE=10
FROSTFS_IR_WORKERS_CONTAINER=10
FROSTFS_IR_WORKERS_NEOFS=10
FROSTFS_IR_WORKERS_NETMAP=10
-FROSTFS_IR_WORKERS_REPUTATION=10
FROSTFS_IR_WORKERS_SUBNET=10
FROSTFS_IR_AUDIT_TIMEOUT_GET=5s
@@ -75,7 +74,6 @@ FROSTFS_IR_CONTRACTS_CONTAINER=ed4a7a66fe3f9bfe50f214b49be8f215a3c886b6
FROSTFS_IR_CONTRACTS_NEOFSID=9f5866decbc751a099e74c7c7bc89f609201755a
FROSTFS_IR_CONTRACTS_NETMAP=83c600c81d47a1b1b7cf58eb49ae7ee7240dc742
FROSTFS_IR_CONTRACTS_PROXY=abc8794bb40a21f2db5f21ae62741eb46c8cad1c
-FROSTFS_IR_CONTRACTS_REPUTATION=d793b842ff0c103fe89e385069e82a27602135ff
FROSTFS_IR_CONTRACTS_SUBNET=e9266864d3c562c6e17f2bb9cb1392aaa293d93a
FROSTFS_IR_CONTRACTS_ALPHABET_AMOUNT=7
FROSTFS_IR_CONTRACTS_ALPHABET_AZ=c1d211fceeb4b1dc76b8e4054d11fdf887e418ea
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 3dca0017a..0e1b12bf6 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -83,7 +83,6 @@ workers:
container: 10 # Number of workers to process events from container contract in parallel
frostfs: 10 # Number of workers to process events from frostfs contracts in parallel
netmap: 10 # Number of workers to process events from netmap contract in parallel
- reputation: 10 # Number of workers to process events from reputation contract in parallel
subnet: 10 # Number of workers to process events from subnet contract in parallel
audit:
@@ -117,7 +116,6 @@ contracts:
frostfsid: 9f5866decbc751a099e74c7c7bc89f609201755a # Optional: override address of frostfsid contract in sidechain
netmap: 83c600c81d47a1b1b7cf58eb49ae7ee7240dc742 # Optional: override address of netmap contract in sidechain
proxy: abc8794bb40a21f2db5f21ae62741eb46c8cad1c # Optional: override address of proxy contract in sidechain; ignore if notary is disabled in sidechain
- reputation: d793b842ff0c103fe89e385069e82a27602135ff # Optional: override address of reputation contract in sidechain
subnet: e9266864d3c562c6e17f2bb9cb1392aaa293d93a # Optional: override address of subnet contract in sidechain
alphabet:
amount: 7 # Optional: override amount of alphabet contracts
diff --git a/config/example/node.env b/config/example/node.env
index a4088f75a..b3fb4f9f9 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -58,7 +58,6 @@ FROSTFS_CONTROL_GRPC_ENDPOINT=localhost:8090
FROSTFS_CONTRACTS_BALANCE=5263abba1abedbf79bb57f3e40b50b4425d2d6cd
FROSTFS_CONTRACTS_CONTAINER=5d084790d7aa36cea7b53fe897380dab11d2cd3c
FROSTFS_CONTRACTS_NETMAP=0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca
-FROSTFS_CONTRACTS_REPUTATION=441995f631c1da2b133462b71859494a5cd45e90
FROSTFS_CONTRACTS_PROXY=ad7c6b55b737b696e5c82c85445040964a03e97f
# Morph chain section
diff --git a/config/example/node.json b/config/example/node.json
index 8cfb5bb69..9e2a0a487 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -96,7 +96,6 @@
"balance": "5263abba1abedbf79bb57f3e40b50b4425d2d6cd",
"container": "5d084790d7aa36cea7b53fe897380dab11d2cd3c",
"netmap": "0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca",
- "reputation": "441995f631c1da2b133462b71859494a5cd45e90",
"proxy": "ad7c6b55b737b696e5c82c85445040964a03e97f"
},
"morph": {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 0d71f0fd2..54d774174 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -78,7 +78,6 @@ contracts: # side chain NEOFS contract script hashes; optional, override values
balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd
container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c
netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca
- reputation: 441995f631c1da2b133462b71859494a5cd45e90
proxy: ad7c6b55b737b696e5c82c85445040964a03e97f
morph:
diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml
index 2b240e0b2..7db476e55 100644
--- a/config/mainnet/config.yml
+++ b/config/mainnet/config.yml
@@ -68,4 +68,3 @@ contracts:
balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55
container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5
netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1
- reputation: 7ad824fd1eeb1565be2cee3889214b9aa605d2fc
diff --git a/config/testnet/config.yml b/config/testnet/config.yml
index 416ba35e4..76b36cdf6 100644
--- a/config/testnet/config.yml
+++ b/config/testnet/config.yml
@@ -16,7 +16,6 @@ contracts:
balance: e0420c216003747626670d1424569c17c79015bf
container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0
netmap: d4b331639799e2958d4bc5b711b469d79de94e01
- reputation: 376c23a2ae1fad088c82046abb59984e3c4519d9
node:
key: /node.key
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 6ecd15907..306577307 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -113,7 +113,6 @@ contracts:
balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd
container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c
netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca
- reputation: 441995f631c1da2b133462b71859494a5cd45e90
proxy: ad7c6b55b737b696e5c82c85445040964a03e97f
```
@@ -123,7 +122,6 @@ contracts:
| `balance` | `hash160` | | Balance contract hash. |
| `container` | `hash160` | | Container contract hash. |
| `netmap` | `hash160` | | Netmap contract hash. |
-| `reputation` | `hash160` | | Reputation contract hash. |
| `subnet` | `hash160` | | Subnet contract hash. |
# `morph` section
diff --git a/pkg/innerring/contracts.go b/pkg/innerring/contracts.go
index c280eb4bf..f723d3507 100644
--- a/pkg/innerring/contracts.go
+++ b/pkg/innerring/contracts.go
@@ -18,7 +18,6 @@ type contracts struct {
audit util.Uint160 // in morph
proxy util.Uint160 // in morph
processing util.Uint160 // in mainnet
- reputation util.Uint160 // in morph
subnet util.Uint160 // in morph
frostfsID util.Uint160 // in morph
@@ -61,7 +60,6 @@ func parseContracts(cfg *viper.Viper, morph *client.Client, withoutMainNet, with
{"contracts.balance", client.NNSBalanceContractName, &result.balance},
{"contracts.container", client.NNSContainerContractName, &result.container},
{"contracts.audit", client.NNSAuditContractName, &result.audit},
- {"contracts.reputation", client.NNSReputationContractName, &result.reputation},
{"contracts.subnet", client.NNSSubnetworkContractName, &result.subnet},
{"contracts.frostfsid", client.NNSFrostFSIDContractName, &result.frostfsID},
}
From 8799138fcb67f21d2cc41749463ad6146bbedacd Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 14:05:31 +0300
Subject: [PATCH 0115/1943] [#247] morph: Drop reputation contract
Signed-off-by: Pavel Karpy
---
pkg/morph/client/nns.go | 2 -
pkg/morph/client/reputation/client.go | 83 -----------------
pkg/morph/client/reputation/get.go | 108 -----------------------
pkg/morph/client/reputation/list.go | 55 ------------
pkg/morph/client/reputation/put.go | 47 ----------
pkg/morph/event/reputation/put.go | 99 ---------------------
pkg/morph/event/reputation/put_notary.go | 74 ----------------
pkg/morph/event/reputation/put_test.go | 92 -------------------
8 files changed, 560 deletions(-)
delete mode 100644 pkg/morph/client/reputation/client.go
delete mode 100644 pkg/morph/client/reputation/get.go
delete mode 100644 pkg/morph/client/reputation/list.go
delete mode 100644 pkg/morph/client/reputation/put.go
delete mode 100644 pkg/morph/event/reputation/put.go
delete mode 100644 pkg/morph/event/reputation/put_notary.go
delete mode 100644 pkg/morph/event/reputation/put_test.go
diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go
index 473b3500b..236cf1ba0 100644
--- a/pkg/morph/client/nns.go
+++ b/pkg/morph/client/nns.go
@@ -32,8 +32,6 @@ const (
NNSNetmapContractName = "netmap.frostfs"
// NNSProxyContractName is a name of the proxy contract in NNS.
NNSProxyContractName = "proxy.frostfs"
- // NNSReputationContractName is a name of the reputation contract in NNS.
- NNSReputationContractName = "reputation.frostfs"
// NNSSubnetworkContractName is a name of the subnet contract in NNS.
NNSSubnetworkContractName = "subnet.frostfs"
// NNSGroupKeyName is a name for the FrostFS group key record in NNS.
diff --git a/pkg/morph/client/reputation/client.go b/pkg/morph/client/reputation/client.go
deleted file mode 100644
index cdaf191ad..000000000
--- a/pkg/morph/client/reputation/client.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
- "github.com/nspcc-dev/neo-go/pkg/util"
-)
-
-// Client is a wrapper over StaticClient
-// which makes calls with the names and arguments
-// of the FrostFS reputation contract.
-//
-// Working client must be created via constructor New.
-// Using the Client that has been created with new(Client)
-// expression (or just declaring a Client variable) is unsafe
-// and can lead to panic.
-type Client struct {
- client *client.StaticClient // static reputation contract client
-}
-
-const (
- putMethod = "put"
- getMethod = "get"
- getByIDMethod = "getByID"
- listByEpochMethod = "listByEpoch"
-)
-
-// NewFromMorph returns the wrapper instance from the raw morph client.
-func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts ...Option) (*Client, error) {
- o := defaultOpts()
-
- for i := range opts {
- opts[i](o)
- }
-
- sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...)
- if err != nil {
- return nil, fmt.Errorf("could not create static client of reputation contract: %w", err)
- }
-
- return &Client{client: sc}, nil
-}
-
-// Morph returns raw morph client.
-func (c Client) Morph() *client.Client {
- return c.client.Morph()
-}
-
-// ContractAddress returns the address of the associated contract.
-func (c Client) ContractAddress() util.Uint160 {
- return c.client.ContractAddress()
-}
-
-// Option allows to set an optional
-// parameter of ClientWrapper.
-type Option func(*opts)
-
-type opts []client.StaticClientOption
-
-func defaultOpts() *opts {
- return new(opts)
-}
-
-// TryNotary returns option to enable
-// notary invocation tries.
-func TryNotary() Option {
- return func(o *opts) {
- *o = append(*o, client.TryNotary())
- }
-}
-
-// AsAlphabet returns option to sign main TX
-// of notary requests with client's private
-// key.
-//
-// Considered to be used by IR nodes only.
-func AsAlphabet() Option {
- return func(o *opts) {
- *o = append(*o, client.AsAlphabet())
- }
-}
diff --git a/pkg/morph/client/reputation/get.go b/pkg/morph/client/reputation/get.go
deleted file mode 100644
index 8f1d24176..000000000
--- a/pkg/morph/client/reputation/get.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
-)
-
-type (
- // GetPrm groups the arguments of "get reputation value" test invocation.
- GetPrm struct {
- epoch uint64
- peerID reputation.PeerID
- }
-
- // GetByIDPrm groups the arguments of "get reputation value by
- // reputation id" test invocation.
- GetByIDPrm struct {
- id ID
- }
-)
-
-// SetEpoch sets epoch of expected reputation value.
-func (g *GetPrm) SetEpoch(v uint64) {
- g.epoch = v
-}
-
-// SetPeerID sets peer id of expected reputation value.
-func (g *GetPrm) SetPeerID(v reputation.PeerID) {
- g.peerID = v
-}
-
-// SetID sets id of expected reputation value in reputation contract.
-func (g *GetByIDPrm) SetID(v ID) {
- g.id = v
-}
-
-// Get invokes the call of "get reputation value" method of reputation contract.
-func (c *Client) Get(p GetPrm) ([]reputation.GlobalTrust, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(getMethod)
- invokePrm.SetArgs(p.epoch, p.peerID.PublicKey())
-
- res, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err)
- }
-
- return parseReputations(res, getMethod)
-}
-
-// GetByID invokes the call of "get reputation value by reputation id" method
-// of reputation contract.
-func (c *Client) GetByID(p GetByIDPrm) ([]reputation.GlobalTrust, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(getByIDMethod)
- invokePrm.SetArgs([]byte(p.id))
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", getByIDMethod, err)
- }
-
- return parseReputations(prms, getByIDMethod)
-}
-
-func parseGetResult(rawReputations [][]byte, method string) ([]reputation.GlobalTrust, error) {
- reputations := make([]reputation.GlobalTrust, 0, len(rawReputations))
-
- for i := range rawReputations {
- r := reputation.GlobalTrust{}
-
- err := r.Unmarshal(rawReputations[i])
- if err != nil {
- return nil, fmt.Errorf("can't unmarshal global trust value (%s): %w", method, err)
- }
-
- reputations = append(reputations, r)
- }
-
- return reputations, nil
-}
-
-func parseReputations(items []stackitem.Item, method string) ([]reputation.GlobalTrust, error) {
- if ln := len(items); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", method, ln)
- }
-
- items, err := client.ArrayFromStackItem(items[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err)
- }
-
- res := make([][]byte, 0, len(items))
-
- for i := range items {
- rawReputation, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", method, err)
- }
-
- res = append(res, rawReputation)
- }
-
- return parseGetResult(res, method)
-}
diff --git a/pkg/morph/client/reputation/list.go b/pkg/morph/client/reputation/list.go
deleted file mode 100644
index 0090efb10..000000000
--- a/pkg/morph/client/reputation/list.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
-)
-
-type (
- // ID is an ID of the reputation record in reputation contract.
- ID []byte
-
- // ListByEpochArgs groups the arguments of
- // "list reputation ids by epoch" test invoke call.
- ListByEpochArgs struct {
- epoch uint64
- }
-)
-
-// SetEpoch sets epoch of expected reputation ids.
-func (l *ListByEpochArgs) SetEpoch(v uint64) {
- l.epoch = v
-}
-
-// ListByEpoch invokes the call of "list reputation ids by epoch" method of
-// reputation contract.
-func (c *Client) ListByEpoch(p ListByEpochArgs) ([]ID, error) {
- invokePrm := client.TestInvokePrm{}
- invokePrm.SetMethod(listByEpochMethod)
- invokePrm.SetArgs(p.epoch)
-
- prms, err := c.client.TestInvoke(invokePrm)
- if err != nil {
- return nil, fmt.Errorf("could not perform test invocation (%s): %w", listByEpochMethod, err)
- } else if ln := len(prms); ln != 1 {
- return nil, fmt.Errorf("unexpected stack item count (%s): %d", listByEpochMethod, ln)
- }
-
- items, err := client.ArrayFromStackItem(prms[0])
- if err != nil {
- return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listByEpochMethod, err)
- }
-
- result := make([]ID, 0, len(items))
- for i := range items {
- rawReputation, err := client.BytesFromStackItem(items[i])
- if err != nil {
- return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listByEpochMethod, err)
- }
-
- result = append(result, rawReputation)
- }
-
- return result, nil
-}
diff --git a/pkg/morph/client/reputation/put.go b/pkg/morph/client/reputation/put.go
deleted file mode 100644
index 02b47defe..000000000
--- a/pkg/morph/client/reputation/put.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
-)
-
-type (
- // PutPrm groups the arguments of "put reputation value" invocation call.
- PutPrm struct {
- epoch uint64
- peerID reputation.PeerID
- value reputation.GlobalTrust
- }
-)
-
-// SetEpoch sets epoch of reputation value.
-func (p *PutPrm) SetEpoch(v uint64) {
- p.epoch = v
-}
-
-// SetPeerID sets peer id of reputation value.
-func (p *PutPrm) SetPeerID(v reputation.PeerID) {
- p.peerID = v
-}
-
-// SetValue sets reputation value.
-func (p *PutPrm) SetValue(v reputation.GlobalTrust) {
- p.value = v
-}
-
-// Put invokes direct call of "put reputation value" method of reputation contract.
-//
-// If TryNotary is provided, calls notary contract.
-func (c *Client) Put(p PutPrm) error {
- prm := client.InvokePrm{}
- prm.SetMethod(putMethod)
- prm.SetArgs(p.epoch, p.peerID.PublicKey(), p.value.Marshal())
-
- err := c.client.Invoke(prm)
- if err != nil {
- return fmt.Errorf("could not invoke method (%s): %w", putMethod, err)
- }
- return nil
-}
diff --git a/pkg/morph/event/reputation/put.go b/pkg/morph/event/reputation/put.go
deleted file mode 100644
index a182bf26c..000000000
--- a/pkg/morph/event/reputation/put.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// Put structure of reputation.reputationPut notification from
-// morph chain.
-type Put struct {
- epoch uint64
- peerID reputation.PeerID
- value reputation.GlobalTrust
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- notaryRequest *payload.P2PNotaryRequest
-}
-
-const peerIDLength = 33 // compressed public key
-
-// MorphEvent implements Neo:Morph Event interface.
-func (Put) MorphEvent() {}
-
-// Epoch returns epoch value of reputation data.
-func (p Put) Epoch() uint64 {
- return p.epoch
-}
-
-// PeerID returns peer id of reputation data.
-func (p Put) PeerID() reputation.PeerID {
- return p.peerID
-}
-
-// Value returns reputation structure.
-func (p Put) Value() reputation.GlobalTrust {
- return p.value
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (p Put) NotaryRequest() *payload.P2PNotaryRequest {
- return p.notaryRequest
-}
-
-// ParsePut from notification into reputation event structure.
-func ParsePut(e *state.ContainedNotificationEvent) (event.Event, error) {
- var (
- ev Put
- err error
- )
-
- params, err := event.ParseStackArray(e)
- if err != nil {
- return nil, fmt.Errorf("could not parse stack items from notify event: %w", err)
- }
-
- if ln := len(params); ln != 3 {
- return nil, event.WrongNumberOfParameters(3, ln)
- }
-
- // parse epoch number
- epoch, err := client.IntFromStackItem(params[0])
- if err != nil {
- return nil, fmt.Errorf("could not get integer epoch number: %w", err)
- }
-
- ev.epoch = uint64(epoch)
-
- // parse peer ID value
- peerID, err := client.BytesFromStackItem(params[1])
- if err != nil {
- return nil, fmt.Errorf("could not get peer ID value: %w", err)
- }
-
- if ln := len(peerID); ln != peerIDLength {
- return nil, fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength)
- }
-
- ev.peerID.SetPublicKey(peerID)
-
- // parse global trust value
- rawValue, err := client.BytesFromStackItem(params[2])
- if err != nil {
- return nil, fmt.Errorf("could not get global trust value: %w", err)
- }
-
- err = ev.value.Unmarshal(rawValue)
- if err != nil {
- return nil, fmt.Errorf("could not parse global trust value: %w", err)
- }
-
- return ev, nil
-}
diff --git a/pkg/morph/event/reputation/put_notary.go b/pkg/morph/event/reputation/put_notary.go
deleted file mode 100644
index f3cd749fc..000000000
--- a/pkg/morph/event/reputation/put_notary.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package reputation
-
-import (
- "fmt"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
-)
-
-func (p *Put) setEpoch(v uint64) {
- p.epoch = v
-}
-
-func (p *Put) setPeerID(v []byte) error {
- if ln := len(v); ln != peerIDLength {
- return fmt.Errorf("peer ID is %d byte long, expected %d", ln, peerIDLength)
- }
-
- p.peerID.SetPublicKey(v)
-
- return nil
-}
-
-func (p *Put) setValue(v []byte) error {
- return p.value.Unmarshal(v)
-}
-
-var fieldSetters = []func(*Put, []byte) error{
- // order on stack is reversed
- (*Put).setValue,
- (*Put).setPeerID,
-}
-
-const (
- // PutNotaryEvent is method name for reputation put operations
- // in `Reputation` contract. Is used as identifier for notary
- // put reputation requests.
- PutNotaryEvent = "put"
-)
-
-// ParsePutNotary from NotaryEvent into reputation event structure.
-func ParsePutNotary(ne event.NotaryEvent) (event.Event, error) {
- var ev Put
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- switch fieldNum {
- case 0, 1:
- data, err := event.BytesFromOpcode(op)
- if err != nil {
- return nil, err
- }
-
- err = fieldSetters[fieldNum](&ev, data)
- if err != nil {
- return nil, fmt.Errorf("can't parse field num %d: %w", fieldNum, err)
- }
- case 2:
- n, err := event.IntFromOpcode(op)
- if err != nil {
- return nil, err
- }
-
- ev.setEpoch(uint64(n))
- default:
- return nil, event.UnexpectedArgNumErr(PutNotaryEvent)
- }
- fieldNum++
- }
-
- ev.notaryRequest = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/morph/event/reputation/put_test.go b/pkg/morph/event/reputation/put_test.go
deleted file mode 100644
index 46356b317..000000000
--- a/pkg/morph/event/reputation/put_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package reputation
-
-import (
- "math/big"
- "testing"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
- reputationtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation/test"
- "github.com/nspcc-dev/neo-go/pkg/core/state"
- "github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
- "github.com/stretchr/testify/require"
-)
-
-func TestParsePut(t *testing.T) {
- var (
- peerID = reputationtest.PeerID()
-
- value reputation.GlobalTrust
- trust reputation.Trust
- trustValue float64 = 0.64
-
- epoch uint64 = 42
- )
-
- trust.SetValue(trustValue)
- trust.SetPeer(peerID)
-
- value.SetTrust(trust)
-
- rawValue := value.Marshal()
-
- t.Run("wrong number of parameters", func(t *testing.T) {
- prms := []stackitem.Item{
- stackitem.NewMap(),
- stackitem.NewMap(),
- }
-
- _, err := ParsePut(createNotifyEventFromItems(prms))
- require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error())
- })
-
- t.Run("wrong epoch parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong peerID parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("wrong value parameter", func(t *testing.T) {
- _, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewByteArray(peerID.PublicKey()),
- stackitem.NewMap(),
- }))
-
- require.Error(t, err)
- })
-
- t.Run("correct behavior", func(t *testing.T) {
- ev, err := ParsePut(createNotifyEventFromItems([]stackitem.Item{
- stackitem.NewBigInteger(new(big.Int).SetUint64(epoch)),
- stackitem.NewByteArray(peerID.PublicKey()),
- stackitem.NewByteArray(rawValue),
- }))
- require.NoError(t, err)
-
- require.Equal(t, Put{
- epoch: epoch,
- peerID: peerID,
- value: value,
- }, ev)
- })
-}
-
-func createNotifyEventFromItems(items []stackitem.Item) *state.ContainedNotificationEvent {
- return &state.ContainedNotificationEvent{
- NotificationEvent: state.NotificationEvent{
- Item: stackitem.NewArray(items),
- },
- }
-}
From b453bb754c3bbeed9694ae8f128e4bcd59ee7677 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 14:12:39 +0300
Subject: [PATCH 0116/1943] [#247] logs: Drop reputation log messages
Signed-off-by: Pavel Karpy
---
internal/logs/logs.go | 56 +------------------------------------------
1 file changed, 1 insertion(+), 55 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 062538747..a6488dcc9 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -95,38 +95,7 @@ const (
PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go
PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go
- CommonStartBuildingManagers = "start building managers" // Debug in ../node/pkg/services/reputation/common/managers.go
- ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerStartingToReportLocalTrustValues = "starting to report local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerCouldNotInitializeIteratorOverLocalTrustValues = "could not initialize iterator over local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerCouldNotInitializeLocalTrustTarget = "could not initialize local trust target" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerIteratorOverLocalTrustFailed = "iterator over local trust failed" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerCouldNotFinishWritingLocalTrustValues = "could not finish writing local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerReportingSuccessfullyFinished = "reporting successfully finished" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerReportingSuccessfullyInterrupted = "reporting successfully interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- ControllerReportingIsNotStartedOrAlreadyInterrupted = "reporting is not started or already interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
- RoutesBuildingNextStageForLocalTrustRoute = "building next stage for local trust route" // Debug in ../node/pkg/services/reputation/local/routes/calls.go
- CalculatorFailedToGetAlphaParam = "failed to get alpha param" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorConsumersTrustIteratorsInitFailure = "consumers trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorWorkerPoolSubmitFailure = "worker pool submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorIterateDaughtersConsumersFailed = "iterate daughter's consumers failed" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorGetInitialTrustFailure = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorDaughterTrustIteratorsInitFailure = "daughter trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorIterateOverDaughtersTrustsFailure = "iterate over daughter's trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorInitWriterFailure = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorWriteFinalResultFailure = "write final result failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorWriteValueFailure = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorIterateDaughterTrustsFailure = "iterate daughter trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorCouldNotCloseWriter = "could not close writer" // Error in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorAllDaughtersTrustIteratorsInitFailure = "all daughters trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- CalculatorIterateOverAllDaughtersFailure = "iterate over all daughters failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
- ControllerCouldNotGetEigenTrustIterationNumber = "could not get EigenTrust iteration number" // Error in ../node/pkg/services/reputation/eigentrust/controller/calls.go
- ControllerIterationSubmitFailure = "iteration submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/controller/calls.go
- RoutesBuildingNextStageForTrustRoute = "building next stage for trust route" // Debug in ../node/pkg/services/reputation/eigentrust/routes/calls.go
- RouterCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/reputation/common/router/calls.go
- RouterCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
- RouterCouldNotWriteTheValue = "could not write the value" // Debug in ../node/pkg/services/reputation/common/router/calls.go
- RouterCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
+ ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
@@ -495,11 +464,6 @@ const (
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
- ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
- ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
- ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
- ReputationCantSendApprovalTxForReputationValue = "can't send approval tx for reputation value" // Warn in ../node/pkg/innerring/processors/reputation/process_put.go
- ReputationReputationWorkerPool = "reputation worker pool" // Debug in ../node/pkg/innerring/processors/reputation/processor.go
SettlementNonAlphabetModeIgnoreAuditPayments = "non alphabet mode, ignore audit payments" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementNewAuditSettlementEvent = "new audit settlement event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
SettlementIgnoreGenesisEpoch = "ignore genesis epoch" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
@@ -556,10 +520,6 @@ const (
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
- FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
- FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
- FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
- FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
@@ -593,8 +553,6 @@ const (
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
- FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
- FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
@@ -617,16 +575,4 @@ const (
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go
- CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
- CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
- CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
- IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
- IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
- IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
- IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
- IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
- IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
- IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
- LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
- LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
)
From beabed788c72fe473cafe54b5afac110f7f82532 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 14:51:13 +0300
Subject: [PATCH 0117/1943] [#247] network_config: Drop reputation
Drop the code that was expected to work with global reputation network
parameters.
Signed-off-by: Pavel Karpy
---
.../internal/modules/morph/config.go | 13 -------
.../modules/morph/initialize_deploy.go | 5 ---
.../internal/modules/morph/netmap_util.go | 2 --
cmd/frostfs-cli/modules/netmap/netinfo.go | 2 --
cmd/frostfs-node/netmap.go | 2 --
pkg/morph/client/netmap/config.go | 36 -------------------
6 files changed, 60 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/config.go b/cmd/frostfs-adm/internal/modules/morph/config.go
index 3a60e7197..09d071b53 100644
--- a/cmd/frostfs-adm/internal/modules/morph/config.go
+++ b/cmd/frostfs-adm/internal/modules/morph/config.go
@@ -56,15 +56,12 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
switch k {
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
- netmap.EtIterationsConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
nbuf := make([]byte, 8)
copy(nbuf[:], v)
n := binary.LittleEndian.Uint64(nbuf)
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
- case netmap.EtAlphaConfig:
- _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (str)\n", k, v)))
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
if len(v) == 0 || len(v) > 1 {
return invalidConfigValueErr(k)
@@ -139,22 +136,12 @@ func parseConfigPair(kvStr string, force bool) (key string, val any, err error)
switch key {
case netmap.AuditFeeConfig, netmap.BasicIncomeRateConfig,
netmap.ContainerFeeConfig, netmap.ContainerAliasFeeConfig,
- netmap.EtIterationsConfig,
netmap.EpochDurationConfig, netmap.IrCandidateFeeConfig,
netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig:
val, err = strconv.ParseInt(valRaw, 10, 64)
if err != nil {
err = fmt.Errorf("could not parse %s's value '%s' as int: %w", key, valRaw, err)
}
- case netmap.EtAlphaConfig:
- // just check that it could
- // be parsed correctly
- _, err = strconv.ParseFloat(v, 64)
- if err != nil {
- err = fmt.Errorf("could not parse %s's value '%s' as float: %w", key, valRaw, err)
- }
-
- val = valRaw
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
val, err = strconv.ParseBool(valRaw)
if err != nil {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
index 156b1f788..9a15b69ba 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize_deploy.go
@@ -50,11 +50,6 @@ const (
subnetContract = "subnet"
)
-const (
- defaultEigenTrustIterations = 4
- defaultEigenTrustAlpha = "0.1"
-)
-
var (
contractList = []string{
auditContract,
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
index 4b6de5bd2..23cfd120c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap_util.go
@@ -15,8 +15,6 @@ func getDefaultNetmapContractConfigMap() map[string]any {
m[netmap.AuditFeeConfig] = viper.GetInt64(auditFeeInitFlag)
m[netmap.ContainerFeeConfig] = viper.GetInt64(containerFeeInitFlag)
m[netmap.ContainerAliasFeeConfig] = viper.GetInt64(containerAliasFeeInitFlag)
- m[netmap.EtIterationsConfig] = int64(defaultEigenTrustIterations)
- m[netmap.EtAlphaConfig] = defaultEigenTrustAlpha
m[netmap.BasicIncomeRateConfig] = viper.GetInt64(incomeRateInitFlag)
m[netmap.IrCandidateFeeConfig] = viper.GetInt64(candidateFeeInitFlag)
m[netmap.WithdrawFeeConfig] = viper.GetInt64(withdrawFeeInitFlag)
diff --git a/cmd/frostfs-cli/modules/netmap/netinfo.go b/cmd/frostfs-cli/modules/netmap/netinfo.go
index f34456c71..17acfd59c 100644
--- a/cmd/frostfs-cli/modules/netmap/netinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/netinfo.go
@@ -41,8 +41,6 @@ var netInfoCmd = &cobra.Command{
cmd.Printf(format, "Audit fee", netInfo.AuditFee())
cmd.Printf(format, "Storage price", netInfo.StoragePrice())
cmd.Printf(format, "Container fee", netInfo.ContainerFee())
- cmd.Printf(format, "EigenTrust alpha", netInfo.EigenTrustAlpha())
- cmd.Printf(format, "Number of EigenTrust iterations", netInfo.NumberOfEigenTrustIterations())
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())
cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize())
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 76cceeb6d..b8a25cb8c 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -451,8 +451,6 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
ni.SetEpochDuration(netInfoMorph.EpochDuration)
ni.SetContainerFee(netInfoMorph.ContainerFee)
ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee)
- ni.SetNumberOfEigenTrustIterations(netInfoMorph.EigenTrustIterations)
- ni.SetEigenTrustAlpha(netInfoMorph.EigenTrustAlpha)
ni.SetIRCandidateFee(netInfoMorph.IRCandidateFee)
ni.SetWithdrawalFee(netInfoMorph.WithdrawalFee)
diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go
index 3011bd541..71ffa2a7a 100644
--- a/pkg/morph/client/netmap/config.go
+++ b/pkg/morph/client/netmap/config.go
@@ -3,7 +3,6 @@ package netmap
import (
"errors"
"fmt"
- "strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
"github.com/nspcc-dev/neo-go/pkg/encoding/bigint"
@@ -17,8 +16,6 @@ const (
EpochDurationConfig = "EpochDuration"
ContainerFeeConfig = "ContainerFee"
ContainerAliasFeeConfig = "ContainerAliasFee"
- EtIterationsConfig = "EigenTrustIterations"
- EtAlphaConfig = "EigenTrustAlpha"
IrCandidateFeeConfig = "InnerRingCandidateFee"
WithdrawFeeConfig = "WithdrawFee"
HomomorphicHashingDisabledKey = "HomomorphicHashingDisabled"
@@ -90,28 +87,6 @@ func (c *Client) ContainerAliasFee() (uint64, error) {
return fee, nil
}
-// EigenTrustIterations returns global configuration value of iteration cycles
-// for EigenTrust algorithm per epoch.
-func (c *Client) EigenTrustIterations() (uint64, error) {
- iterations, err := c.readUInt64Config(EtIterationsConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get eigen trust iterations: %w", c, err)
- }
-
- return iterations, nil
-}
-
-// EigenTrustAlpha returns global configuration value of alpha parameter.
-// It receives the alpha as a string and tries to convert it to float.
-func (c *Client) EigenTrustAlpha() (float64, error) {
- strAlpha, err := c.readStringConfig(EtAlphaConfig)
- if err != nil {
- return 0, fmt.Errorf("(%T) could not get eigen trust alpha: %w", c, err)
- }
-
- return strconv.ParseFloat(strAlpha, 64)
-}
-
// HomomorphicHashDisabled returns global configuration value of homomorphic hashing
// settings.
//
@@ -246,10 +221,6 @@ type NetworkConfiguration struct {
ContainerAliasFee uint64
- EigenTrustIterations uint64
-
- EigenTrustAlpha float64
-
IRCandidateFee uint64
WithdrawalFee uint64
@@ -311,13 +282,6 @@ func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) {
res.ContainerFee = bytesToUint64(value)
case ContainerAliasFeeConfig:
res.ContainerAliasFee = bytesToUint64(value)
- case EtIterationsConfig:
- res.EigenTrustIterations = bytesToUint64(value)
- case EtAlphaConfig:
- res.EigenTrustAlpha, err = strconv.ParseFloat(string(value), 64)
- if err != nil {
- return fmt.Errorf("invalid prm %s: %v", EtAlphaConfig, err)
- }
case IrCandidateFeeConfig:
res.IRCandidateFee = bytesToUint64(value)
case WithdrawFeeConfig:
From 070154d506238f6712e0a327918b1e797106d134 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 14:54:28 +0300
Subject: [PATCH 0118/1943] [#247] client: Drop reputation related RPCs
Signed-off-by: Pavel Karpy
---
pkg/core/client/client.go | 2 --
pkg/network/cache/multi.go | 18 ------------------
2 files changed, 20 deletions(-)
diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go
index d74adddcc..98e9d6367 100644
--- a/pkg/core/client/client.go
+++ b/pkg/core/client/client.go
@@ -19,8 +19,6 @@ type Client interface {
ObjectSearchInit(context.Context, client.PrmObjectSearch) (*client.ObjectListReader, error)
ObjectRangeInit(context.Context, client.PrmObjectRange) (*client.ObjectRangeReader, error)
ObjectHash(context.Context, client.PrmObjectHash) (*client.ResObjectHash, error)
- AnnounceLocalTrust(context.Context, client.PrmAnnounceLocalTrust) (*client.ResAnnounceLocalTrust, error)
- AnnounceIntermediateTrust(context.Context, client.PrmAnnounceIntermediateTrust) (*client.ResAnnounceIntermediateTrust, error)
ExecRaw(f func(client *rawclient.Client) error) error
Close() error
}
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 39c191b78..2f698912c 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -277,24 +277,6 @@ func (x *multiClient) ObjectSearchInit(ctx context.Context, p client.PrmObjectSe
return
}
-func (x *multiClient) AnnounceLocalTrust(ctx context.Context, prm client.PrmAnnounceLocalTrust) (res *client.ResAnnounceLocalTrust, err error) {
- err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.AnnounceLocalTrust(ctx, prm)
- return err
- })
-
- return
-}
-
-func (x *multiClient) AnnounceIntermediateTrust(ctx context.Context, prm client.PrmAnnounceIntermediateTrust) (res *client.ResAnnounceIntermediateTrust, err error) {
- err = x.iterateClients(ctx, func(c clientcore.Client) error {
- res, err = c.AnnounceIntermediateTrust(ctx, prm)
- return err
- })
-
- return
-}
-
func (x *multiClient) ExecRaw(f func(client *rawclient.Client) error) error {
panic("multiClient.ExecRaw() must not be called")
}
From 8466894fdff20a6ac1b8b9b9b972f8c0cb436862 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 14 Apr 2023 09:38:29 +0300
Subject: [PATCH 0119/1943] [#250] control: remove `DumpShard` and
`RestoreShard` RPC
We have `Evacuate` with a cleaner interface.
Also, remove them from CLI and engine.
Signed-off-by: Evgenii Stratonikov
---
.../modules/control/evacuate_shard.go | 6 +-
cmd/frostfs-cli/modules/control/shards.go | 4 -
.../modules/control/shards_dump.go | 66 -
.../modules/control/shards_restore.go | 66 -
pkg/local_object_storage/engine/dump.go | 19 -
pkg/local_object_storage/engine/evacuate.go | 5 +-
.../engine/evacuate_test.go | 2 +-
pkg/local_object_storage/engine/restore.go | 32 -
pkg/local_object_storage/shard/dump.go | 129 --
pkg/local_object_storage/shard/dump_test.go | 412 ------
pkg/local_object_storage/shard/restore.go | 145 --
pkg/services/control/convert.go | 36 -
pkg/services/control/rpc.go | 28 -
pkg/services/control/server/dump.go | 37 -
pkg/services/control/server/restore.go | 37 -
pkg/services/control/service.go | 58 -
pkg/services/control/service.pb.go | 1246 +++++------------
pkg/services/control/service.proto | 75 -
pkg/services/control/service_frostfs.pb.go | 310 ----
pkg/services/control/service_grpc.pb.go | 78 --
20 files changed, 325 insertions(+), 2466 deletions(-)
delete mode 100644 cmd/frostfs-cli/modules/control/shards_dump.go
delete mode 100644 cmd/frostfs-cli/modules/control/shards_restore.go
delete mode 100644 pkg/local_object_storage/engine/dump.go
delete mode 100644 pkg/local_object_storage/engine/restore.go
delete mode 100644 pkg/local_object_storage/shard/dump.go
delete mode 100644 pkg/local_object_storage/shard/dump_test.go
delete mode 100644 pkg/local_object_storage/shard/restore.go
delete mode 100644 pkg/services/control/server/dump.go
delete mode 100644 pkg/services/control/server/restore.go
diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go
index 02ee88ce0..b72ff6301 100644
--- a/cmd/frostfs-cli/modules/control/evacuate_shard.go
+++ b/cmd/frostfs-cli/modules/control/evacuate_shard.go
@@ -8,6 +8,8 @@ import (
"github.com/spf13/cobra"
)
+const ignoreErrorsFlag = "no-errors"
+
var evacuateShardCmd = &cobra.Command{
Use: "evacuate",
Short: "Evacuate objects from shard",
@@ -20,7 +22,7 @@ func evacuateShard(cmd *cobra.Command, _ []string) {
req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)}
req.Body.Shard_ID = getShardIDList(cmd)
- req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
+ req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag)
signRequest(cmd, pk, req)
@@ -47,7 +49,7 @@ func initControlEvacuateShardCmd() {
flags := evacuateShardCmd.Flags()
flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
flags.Bool(shardAllFlag, false, "Process all shards")
- flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
+ flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects")
evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index 9d3eb5c01..8e7ecff8c 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -13,16 +13,12 @@ var shardsCmd = &cobra.Command{
func initControlShardsCmd() {
shardsCmd.AddCommand(listShardsCmd)
shardsCmd.AddCommand(setShardModeCmd)
- shardsCmd.AddCommand(dumpShardCmd)
- shardsCmd.AddCommand(restoreShardCmd)
shardsCmd.AddCommand(evacuateShardCmd)
shardsCmd.AddCommand(flushCacheCmd)
shardsCmd.AddCommand(doctorCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
- initControlDumpShardCmd()
- initControlRestoreShardCmd()
initControlEvacuateShardCmd()
initControlFlushCacheCmd()
initControlDoctorCmd()
diff --git a/cmd/frostfs-cli/modules/control/shards_dump.go b/cmd/frostfs-cli/modules/control/shards_dump.go
deleted file mode 100644
index c0d0aca95..000000000
--- a/cmd/frostfs-cli/modules/control/shards_dump.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "github.com/spf13/cobra"
-)
-
-const (
- dumpFilepathFlag = "path"
- dumpIgnoreErrorsFlag = "no-errors"
-)
-
-var dumpShardCmd = &cobra.Command{
- Use: "dump",
- Short: "Dump objects from shard",
- Long: "Dump objects from shard to a file",
- Run: dumpShard,
-}
-
-func dumpShard(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- body := new(control.DumpShardRequest_Body)
- body.SetShardID(getShardID(cmd))
-
- p, _ := cmd.Flags().GetString(dumpFilepathFlag)
- body.SetFilepath(p)
-
- ignore, _ := cmd.Flags().GetBool(dumpIgnoreErrorsFlag)
- body.SetIgnoreErrors(ignore)
-
- req := new(control.DumpShardRequest)
- req.SetBody(body)
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.DumpShardResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.DumpShard(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Shard has been dumped successfully.")
-}
-
-func initControlDumpShardCmd() {
- initControlFlags(dumpShardCmd)
-
- flags := dumpShardCmd.Flags()
- flags.String(shardIDFlag, "", "Shard ID in base58 encoding")
- flags.String(dumpFilepathFlag, "", "File to write objects to")
- flags.Bool(dumpIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
-
- _ = dumpShardCmd.MarkFlagRequired(shardIDFlag)
- _ = dumpShardCmd.MarkFlagRequired(dumpFilepathFlag)
- _ = dumpShardCmd.MarkFlagRequired(controlRPC)
-}
diff --git a/cmd/frostfs-cli/modules/control/shards_restore.go b/cmd/frostfs-cli/modules/control/shards_restore.go
deleted file mode 100644
index edf97a731..000000000
--- a/cmd/frostfs-cli/modules/control/shards_restore.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package control
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "github.com/spf13/cobra"
-)
-
-const (
- restoreFilepathFlag = "path"
- restoreIgnoreErrorsFlag = "no-errors"
-)
-
-var restoreShardCmd = &cobra.Command{
- Use: "restore",
- Short: "Restore objects from shard",
- Long: "Restore objects from shard to a file",
- Run: restoreShard,
-}
-
-func restoreShard(cmd *cobra.Command, _ []string) {
- pk := key.Get(cmd)
-
- body := new(control.RestoreShardRequest_Body)
- body.SetShardID(getShardID(cmd))
-
- p, _ := cmd.Flags().GetString(restoreFilepathFlag)
- body.SetFilepath(p)
-
- ignore, _ := cmd.Flags().GetBool(restoreIgnoreErrorsFlag)
- body.SetIgnoreErrors(ignore)
-
- req := new(control.RestoreShardRequest)
- req.SetBody(body)
-
- signRequest(cmd, pk, req)
-
- cli := getClient(cmd, pk)
-
- var resp *control.RestoreShardResponse
- var err error
- err = cli.ExecRaw(func(client *client.Client) error {
- resp, err = control.RestoreShard(client, req)
- return err
- })
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
-
- cmd.Println("Shard has been restored successfully.")
-}
-
-func initControlRestoreShardCmd() {
- initControlFlags(restoreShardCmd)
-
- flags := restoreShardCmd.Flags()
- flags.String(shardIDFlag, "", "Shard ID in base58 encoding")
- flags.String(restoreFilepathFlag, "", "File to read objects from")
- flags.Bool(restoreIgnoreErrorsFlag, false, "Skip invalid/unreadable objects")
-
- _ = restoreShardCmd.MarkFlagRequired(shardIDFlag)
- _ = restoreShardCmd.MarkFlagRequired(restoreFilepathFlag)
- _ = restoreShardCmd.MarkFlagRequired(controlRPC)
-}
diff --git a/pkg/local_object_storage/engine/dump.go b/pkg/local_object_storage/engine/dump.go
deleted file mode 100644
index f5cf8c32e..000000000
--- a/pkg/local_object_storage/engine/dump.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package engine
-
-import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
-
-// DumpShard dumps objects from the shard with provided identifier.
-//
-// Returns an error if shard is not read-only.
-func (e *StorageEngine) DumpShard(id *shard.ID, prm shard.DumpPrm) error {
- e.mtx.RLock()
- defer e.mtx.RUnlock()
-
- sh, ok := e.shards[id.String()]
- if !ok {
- return errShardNotFound
- }
-
- _, err := sh.Dump(prm)
- return err
-}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 2ec2c2b35..e212784a3 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -16,6 +17,8 @@ import (
"go.uber.org/zap"
)
+var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode")
+
// EvacuateShardPrm represents parameters for the EvacuateShard operation.
type EvacuateShardPrm struct {
shardID []*shard.ID
@@ -135,7 +138,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, handlerDefined bool)
}
if !sh.GetMode().ReadOnly() {
- return nil, nil, shard.ErrMustBeReadOnly
+ return nil, nil, ErrMustBeReadOnly
}
}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 291bc2b78..fc9da5e3f 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -103,7 +103,7 @@ func TestEvacuateShard(t *testing.T) {
t.Run("must be read-only", func(t *testing.T) {
res, err := e.Evacuate(context.Background(), prm)
- require.ErrorIs(t, err, shard.ErrMustBeReadOnly)
+ require.ErrorIs(t, err, ErrMustBeReadOnly)
require.Equal(t, 0, res.Count())
})
diff --git a/pkg/local_object_storage/engine/restore.go b/pkg/local_object_storage/engine/restore.go
deleted file mode 100644
index 7cc2eaf6c..000000000
--- a/pkg/local_object_storage/engine/restore.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package engine
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// RestoreShard restores objects from dump to the shard with provided identifier.
-//
-// Returns an error if shard is not read-only.
-func (e *StorageEngine) RestoreShard(ctx context.Context, id *shard.ID, prm shard.RestorePrm) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.RestoreShard",
- trace.WithAttributes(
- attribute.String("shard_id", id.String()),
- ))
- defer span.End()
-
- e.mtx.RLock()
- defer e.mtx.RUnlock()
-
- sh, ok := e.shards[id.String()]
- if !ok {
- return errShardNotFound
- }
-
- _, err := sh.Restore(ctx, prm)
- return err
-}
diff --git a/pkg/local_object_storage/shard/dump.go b/pkg/local_object_storage/shard/dump.go
deleted file mode 100644
index 8d9fe0f71..000000000
--- a/pkg/local_object_storage/shard/dump.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package shard
-
-import (
- "encoding/binary"
- "io"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
-)
-
-var dumpMagic = []byte("NEOF")
-
-// DumpPrm groups the parameters of Dump operation.
-type DumpPrm struct {
- path string
- stream io.Writer
- ignoreErrors bool
-}
-
-// WithPath is an Dump option to set the destination path.
-func (p *DumpPrm) WithPath(path string) {
- p.path = path
-}
-
-// WithStream is an Dump option to set the destination stream.
-// It takes priority over `path` option.
-func (p *DumpPrm) WithStream(r io.Writer) {
- p.stream = r
-}
-
-// WithIgnoreErrors is an Dump option to allow ignore all errors during iteration.
-// This includes invalid blobovniczas as well as corrupted objects.
-func (p *DumpPrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// DumpRes groups the result fields of Dump operation.
-type DumpRes struct {
- count int
-}
-
-// Count return amount of object written.
-func (r DumpRes) Count() int {
- return r.count
-}
-
-var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode")
-
-// Dump dumps all objects from the shard to a file or stream.
-//
-// Returns any error encountered.
-func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) {
- s.m.RLock()
- defer s.m.RUnlock()
-
- if !s.info.Mode.ReadOnly() {
- return DumpRes{}, ErrMustBeReadOnly
- }
-
- w := prm.stream
- if w == nil {
- f, err := os.OpenFile(prm.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640)
- if err != nil {
- return DumpRes{}, err
- }
- defer f.Close()
-
- w = f
- }
-
- _, err := w.Write(dumpMagic)
- if err != nil {
- return DumpRes{}, err
- }
-
- var count int
-
- if s.hasWriteCache() {
- var iterPrm writecache.IterationPrm
-
- iterPrm.WithIgnoreErrors(prm.ignoreErrors)
- iterPrm.WithHandler(func(data []byte) error {
- var size [4]byte
- binary.LittleEndian.PutUint32(size[:], uint32(len(data)))
- if _, err := w.Write(size[:]); err != nil {
- return err
- }
-
- if _, err := w.Write(data); err != nil {
- return err
- }
-
- count++
- return nil
- })
-
- err := s.writeCache.Iterate(iterPrm)
- if err != nil {
- return DumpRes{}, err
- }
- }
-
- var pi common.IteratePrm
- pi.IgnoreErrors = prm.ignoreErrors
- pi.Handler = func(elem common.IterationElement) error {
- data := elem.ObjectData
-
- var size [4]byte
- binary.LittleEndian.PutUint32(size[:], uint32(len(data)))
- if _, err := w.Write(size[:]); err != nil {
- return err
- }
-
- if _, err := w.Write(data); err != nil {
- return err
- }
-
- count++
- return nil
- }
-
- if _, err := s.blobStor.Iterate(pi); err != nil {
- return DumpRes{}, err
- }
-
- return DumpRes{count: count}, nil
-}
diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go
deleted file mode 100644
index 921717204..000000000
--- a/pkg/local_object_storage/shard/dump_test.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package shard_test
-
-import (
- "bytes"
- "context"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
- "github.com/klauspost/compress/zstd"
- "github.com/stretchr/testify/require"
- "go.uber.org/zap/zaptest"
-)
-
-func TestDump(t *testing.T) {
- t.Run("without write-cache", func(t *testing.T) {
- testDump(t, 10, false)
- })
- t.Run("with write-cache", func(t *testing.T) {
- // Put a bit more objects to write-cache to facilitate race-conditions.
- testDump(t, 100, true)
- })
-}
-
-func testDump(t *testing.T, objCount int, hasWriteCache bool) {
- const (
- wcSmallObjectSize = 1024 // 1 KiB, goes to write-cache memory
- wcBigObjectSize = 4 * 1024 // 4 KiB, goes to write-cache FSTree
- bsSmallObjectSize = 10 * 1024 // 10 KiB, goes to blobovnicza DB
- bsBigObjectSize = 1024*1024 + 1 // > 1 MiB, goes to blobovnicza FSTree
- )
-
- var sh *shard.Shard
- if !hasWriteCache {
- sh = newShard(t, false)
- } else {
- sh = newCustomShard(t, t.TempDir(), true,
- []writecache.Option{
- writecache.WithSmallObjectSize(wcSmallObjectSize),
- writecache.WithMaxObjectSize(wcBigObjectSize),
- writecache.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
- },
- nil)
- }
- defer releaseShard(sh, t)
-
- out := filepath.Join(t.TempDir(), "dump")
- var prm shard.DumpPrm
- prm.WithPath(out)
-
- t.Run("must be read-only", func(t *testing.T) {
- _, err := sh.Dump(prm)
- require.ErrorIs(t, err, shard.ErrMustBeReadOnly)
- })
-
- require.NoError(t, sh.SetMode(mode.ReadOnly))
- outEmpty := out + ".empty"
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath(outEmpty)
-
- res, err := sh.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, 0, res.Count())
- require.NoError(t, sh.SetMode(mode.ReadWrite))
-
- // Approximate object header size.
- const headerSize = 400
-
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- cnr := cidtest.ID()
- var size int
- switch i % 6 {
- case 0, 1:
- size = wcSmallObjectSize - headerSize
- case 2, 3:
- size = bsSmallObjectSize - headerSize
- case 4:
- size = wcBigObjectSize - headerSize
- default:
- size = bsBigObjectSize - headerSize
- }
- data := make([]byte, size)
- rand.Read(data)
- obj := testutil.GenerateObjectWithCIDWithPayload(cnr, data)
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh.Put(context.Background(), prm)
- require.NoError(t, err)
- }
-
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- t.Run("invalid path", func(t *testing.T) {
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath("\x00")
-
- _, err := sh.Dump(dumpPrm)
- require.Error(t, err)
- })
-
- res, err = sh.Dump(prm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
-
- t.Run("restore", func(t *testing.T) {
- sh := newShard(t, false)
- defer releaseShard(sh, t)
-
- t.Run("empty dump", func(t *testing.T) {
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(outEmpty)
- res, err := sh.Restore(context.Background(), restorePrm)
- require.NoError(t, err)
- require.Equal(t, 0, res.Count())
- })
-
- t.Run("invalid path", func(t *testing.T) {
- _, err := sh.Restore(context.Background(), *new(shard.RestorePrm))
- require.ErrorIs(t, err, os.ErrNotExist)
- })
-
- t.Run("invalid file", func(t *testing.T) {
- t.Run("invalid magic", func(t *testing.T) {
- out := out + ".wrongmagic"
- require.NoError(t, os.WriteFile(out, []byte{0, 0, 0, 0}, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(context.Background(), restorePrm)
- require.ErrorIs(t, err, shard.ErrInvalidMagic)
- })
-
- fileData, err := os.ReadFile(out)
- require.NoError(t, err)
-
- t.Run("incomplete size", func(t *testing.T) {
- out := out + ".wrongsize"
- fileData := append(fileData, 1)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(context.Background(), restorePrm)
- require.ErrorIs(t, err, io.ErrUnexpectedEOF)
- })
- t.Run("incomplete object data", func(t *testing.T) {
- out := out + ".wrongsize"
- fileData := append(fileData, 1, 0, 0, 0)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(context.Background(), restorePrm)
- require.ErrorIs(t, err, io.EOF)
- })
- t.Run("invalid object", func(t *testing.T) {
- out := out + ".wrongobj"
- fileData := append(fileData, 1, 0, 0, 0, 0xFF, 4, 0, 0, 0, 1, 2, 3, 4)
- require.NoError(t, os.WriteFile(out, fileData, os.ModePerm))
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
-
- _, err := sh.Restore(context.Background(), restorePrm)
- require.Error(t, err)
-
- t.Run("skip errors", func(t *testing.T) {
- sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil)
- t.Cleanup(func() { require.NoError(t, sh.Close()) })
-
- var restorePrm shard.RestorePrm
- restorePrm.WithPath(out)
- restorePrm.WithIgnoreErrors(true)
-
- res, err := sh.Restore(context.Background(), restorePrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
- require.Equal(t, 2, res.FailCount())
- })
- })
- })
-
- var prm shard.RestorePrm
- prm.WithPath(out)
- t.Run("must allow write", func(t *testing.T) {
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- _, err := sh.Restore(context.Background(), prm)
- require.ErrorIs(t, err, shard.ErrReadOnlyMode)
- })
-
- require.NoError(t, sh.SetMode(mode.ReadWrite))
-
- checkRestore(t, sh, prm, objects)
- })
-}
-
-func TestStream(t *testing.T) {
- sh1 := newCustomShard(t, filepath.Join(t.TempDir(), "shard1"), false, nil, nil)
- defer releaseShard(sh1, t)
-
- sh2 := newCustomShard(t, filepath.Join(t.TempDir(), "shard2"), false, nil, nil)
- defer releaseShard(sh2, t)
-
- const objCount = 5
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- cnr := cidtest.ID()
- obj := testutil.GenerateObjectWithCID(cnr)
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh1.Put(context.Background(), prm)
- require.NoError(t, err)
- }
-
- require.NoError(t, sh1.SetMode(mode.ReadOnly))
-
- r, w := io.Pipe()
- finish := make(chan struct{})
-
- go func() {
- var dumpPrm shard.DumpPrm
- dumpPrm.WithStream(w)
-
- res, err := sh1.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
- require.NoError(t, w.Close())
- close(finish)
- }()
-
- var restorePrm shard.RestorePrm
- restorePrm.WithStream(r)
-
- checkRestore(t, sh2, restorePrm, objects)
- require.Eventually(t, func() bool {
- select {
- case <-finish:
- return true
- default:
- return false
- }
- }, time.Second, time.Millisecond)
-}
-
-func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) {
- res, err := sh.Restore(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, len(objects), res.Count())
-
- var getPrm shard.GetPrm
-
- for i := range objects {
- getPrm.SetAddress(object.AddressOf(objects[i]))
- res, err := sh.Get(context.Background(), getPrm)
- require.NoError(t, err)
- require.Equal(t, objects[i], res.Object())
- }
-}
-
-func TestDumpIgnoreErrors(t *testing.T) {
- const (
- wcSmallObjectSize = 512 // goes to write-cache memory
- wcBigObjectSize = wcSmallObjectSize << 1 // goes to write-cache FSTree
- bsSmallObjectSize = wcSmallObjectSize << 2 // goes to blobovnicza DB
-
- objCount = 10
- headerSize = 400
- )
-
- dir := t.TempDir()
- bsPath := filepath.Join(dir, "blob")
- bsOpts := func(sw uint64) []blobstor.Option {
- return []blobstor.Option{
- blobstor.WithCompressObjects(true),
- blobstor.WithStorages([]blobstor.SubStorage{
- {
- Storage: blobovniczatree.NewBlobovniczaTree(
- blobovniczatree.WithRootPath(filepath.Join(bsPath, "blobovnicza")),
- blobovniczatree.WithBlobovniczaShallowDepth(1),
- blobovniczatree.WithBlobovniczaShallowWidth(sw),
- blobovniczatree.WithOpenedCacheSize(1)),
- Policy: func(_ *objectSDK.Object, data []byte) bool {
- return len(data) < bsSmallObjectSize
- },
- },
- {
- Storage: fstree.New(
- fstree.WithPath(bsPath),
- fstree.WithDepth(1)),
- },
- }),
- }
- }
- wcPath := filepath.Join(dir, "writecache")
- wcOpts := []writecache.Option{
- writecache.WithPath(wcPath),
- writecache.WithSmallObjectSize(wcSmallObjectSize),
- writecache.WithMaxObjectSize(wcBigObjectSize),
- }
- sh := newCustomShard(t, dir, true, wcOpts, bsOpts(2))
-
- objects := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
- size := (wcSmallObjectSize << (i % 4)) - headerSize
- obj := testutil.GenerateObjectWithCIDWithPayload(cidtest.ID(), make([]byte, size))
- objects[i] = obj
-
- var prm shard.PutPrm
- prm.SetObject(objects[i])
- _, err := sh.Put(context.Background(), prm)
- require.NoError(t, err)
- }
-
- releaseShard(sh, t)
-
- b := bytes.NewBuffer(nil)
- badObject := make([]byte, 1000)
- enc, err := zstd.NewWriter(b)
- require.NoError(t, err)
- corruptedData := enc.EncodeAll(badObject, nil)
- for i := 4; i < len(corruptedData); i++ {
- corruptedData[i] ^= 0xFF
- }
-
- // There are 3 different types of errors to consider.
- // To setup envirionment we use implementation details so this test must be updated
- // if any of them are changed.
- {
- // 1. Invalid object in fs tree.
- // 1.1. Invalid compressed data.
- addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dirName := filepath.Join(bsPath, addr[:2])
- require.NoError(t, os.MkdirAll(dirName, os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(dirName, addr[2:]), corruptedData, os.ModePerm))
-
- // 1.2. Unreadable file.
- addr = cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dirName = filepath.Join(bsPath, addr[:2])
- require.NoError(t, os.MkdirAll(dirName, os.ModePerm))
-
- fname := filepath.Join(dirName, addr[2:])
- require.NoError(t, os.WriteFile(fname, []byte{}, 0))
-
- // 1.3. Unreadable dir.
- require.NoError(t, os.MkdirAll(filepath.Join(bsPath, "ZZ"), 0))
- }
-
- sh = newCustomShard(t, dir, true, wcOpts, bsOpts(3))
- require.NoError(t, sh.SetMode(mode.ReadOnly))
-
- {
- // 2. Invalid object in blobovnicza.
- // 2.1. Invalid blobovnicza.
- bTree := filepath.Join(bsPath, "blobovnicza")
- data := make([]byte, 1024)
- rand.Read(data)
- require.NoError(t, os.WriteFile(filepath.Join(bTree, "0", "2"), data, 0))
-
- // 2.2. Invalid object in valid blobovnicza.
- var prm blobovnicza.PutPrm
- prm.SetAddress(oid.Address{})
- prm.SetMarshaledObject(corruptedData)
- b := blobovnicza.New(blobovnicza.WithPath(filepath.Join(bTree, "1", "2")))
- require.NoError(t, b.Open())
- _, err := b.Put(prm)
- require.NoError(t, err)
- require.NoError(t, b.Close())
- }
-
- {
- // 3. Invalid object in write-cache. Note that because shard is read-only
- // the object won't be flushed.
- addr := cidtest.ID().EncodeToString() + "." + objecttest.ID().EncodeToString()
- dir := filepath.Join(wcPath, addr[:1])
- require.NoError(t, os.MkdirAll(dir, os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(dir, addr[1:]), nil, 0))
- }
-
- out := filepath.Join(t.TempDir(), "out.dump")
- var dumpPrm shard.DumpPrm
- dumpPrm.WithPath(out)
- dumpPrm.WithIgnoreErrors(true)
- res, err := sh.Dump(dumpPrm)
- require.NoError(t, err)
- require.Equal(t, objCount, res.Count())
-}
diff --git a/pkg/local_object_storage/shard/restore.go b/pkg/local_object_storage/shard/restore.go
deleted file mode 100644
index 2cb64a518..000000000
--- a/pkg/local_object_storage/shard/restore.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package shard
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "io"
- "os"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// ErrInvalidMagic is returned when dump format is invalid.
-var ErrInvalidMagic = logicerr.New("invalid magic")
-
-// RestorePrm groups the parameters of Restore operation.
-type RestorePrm struct {
- path string
- stream io.Reader
- ignoreErrors bool
-}
-
-// WithPath is a Restore option to set the destination path.
-func (p *RestorePrm) WithPath(path string) {
- p.path = path
-}
-
-// WithStream is a Restore option to set the stream to read objects from.
-// It takes priority over `WithPath` option.
-func (p *RestorePrm) WithStream(r io.Reader) {
- p.stream = r
-}
-
-// WithIgnoreErrors is a Restore option which allows to ignore errors encountered during restore.
-// Corrupted objects will not be processed.
-func (p *RestorePrm) WithIgnoreErrors(ignore bool) {
- p.ignoreErrors = ignore
-}
-
-// RestoreRes groups the result fields of Restore operation.
-type RestoreRes struct {
- count int
- failed int
-}
-
-// Count return amount of object written.
-func (r RestoreRes) Count() int {
- return r.count
-}
-
-// FailCount return amount of object skipped.
-func (r RestoreRes) FailCount() int {
- return r.failed
-}
-
-// Restore restores objects from the dump prepared by Dump.
-//
-// Returns any error encountered.
-func (s *Shard) Restore(ctx context.Context, prm RestorePrm) (RestoreRes, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "Shard.Restore",
- trace.WithAttributes(
- attribute.String("shard_id", s.ID().String()),
- attribute.String("path", prm.path),
- attribute.Bool("ignore_errors", prm.ignoreErrors),
- ))
- defer span.End()
-
- s.m.RLock()
- defer s.m.RUnlock()
-
- if s.info.Mode.ReadOnly() {
- return RestoreRes{}, ErrReadOnlyMode
- }
-
- r := prm.stream
- if r == nil {
- f, err := os.OpenFile(prm.path, os.O_RDONLY, os.ModeExclusive)
- if err != nil {
- return RestoreRes{}, err
- }
- defer f.Close()
-
- r = f
- }
-
- var m [4]byte
- _, _ = io.ReadFull(r, m[:])
- if !bytes.Equal(m[:], dumpMagic) {
- return RestoreRes{}, ErrInvalidMagic
- }
-
- var putPrm PutPrm
-
- var count, failCount int
- var data []byte
- var size [4]byte
- for {
- // If there are less than 4 bytes left, `Read` returns nil error instead of
- // io.ErrUnexpectedEOF, thus `ReadFull` is used.
- _, err := io.ReadFull(r, size[:])
- if err != nil {
- if errors.Is(err, io.EOF) {
- break
- }
- return RestoreRes{}, err
- }
-
- sz := binary.LittleEndian.Uint32(size[:])
- if uint32(cap(data)) < sz {
- data = make([]byte, sz)
- } else {
- data = data[:sz]
- }
-
- _, err = r.Read(data)
- if err != nil {
- return RestoreRes{}, err
- }
-
- obj := object.New()
- err = obj.Unmarshal(data)
- if err != nil {
- if prm.ignoreErrors {
- failCount++
- continue
- }
- return RestoreRes{}, err
- }
-
- putPrm.SetObject(obj)
- _, err = s.Put(ctx, putPrm)
- if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) {
- return RestoreRes{}, err
- }
-
- count++
- }
-
- return RestoreRes{count: count, failed: failCount}, nil
-}
diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go
index f7582dd68..84bde31d6 100644
--- a/pkg/services/control/convert.go
+++ b/pkg/services/control/convert.go
@@ -111,42 +111,6 @@ func (w *setShardModeResponseWrapper) FromGRPCMessage(m grpc.Message) error {
return nil
}
-type dumpShardResponseWrapper struct {
- *DumpShardResponse
-}
-
-func (w *dumpShardResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.DumpShardResponse
-}
-
-func (w *dumpShardResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*DumpShardResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*DumpShardResponse)(nil))
- }
-
- w.DumpShardResponse = r
- return nil
-}
-
-type restoreShardResponseWrapper struct {
- *RestoreShardResponse
-}
-
-func (w *restoreShardResponseWrapper) ToGRPCMessage() grpc.Message {
- return w.RestoreShardResponse
-}
-
-func (w *restoreShardResponseWrapper) FromGRPCMessage(m grpc.Message) error {
- r, ok := m.(*RestoreShardResponse)
- if !ok {
- return message.NewUnexpectedMessageType(m, (*RestoreShardResponse)(nil))
- }
-
- w.RestoreShardResponse = r
- return nil
-}
-
type synchronizeTreeResponseWrapper struct {
*SynchronizeTreeResponse
}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 2676ea7a5..625f485c9 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -13,8 +13,6 @@ const (
rpcDropObjects = "DropObjects"
rpcListShards = "ListShards"
rpcSetShardMode = "SetShardMode"
- rpcDumpShard = "DumpShard"
- rpcRestoreShard = "RestoreShard"
rpcSynchronizeTree = "SynchronizeTree"
rpcEvacuateShard = "EvacuateShard"
rpcFlushCache = "FlushCache"
@@ -128,32 +126,6 @@ func SetShardMode(
return wResp.m, nil
}
-// DumpShard executes ControlService.DumpShard RPC.
-func DumpShard(cli *client.Client, req *DumpShardRequest, opts ...client.CallOption) (*DumpShardResponse, error) {
- wResp := &dumpShardResponseWrapper{new(DumpShardResponse)}
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcDumpShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.DumpShardResponse, nil
-}
-
-// RestoreShard executes ControlService.DumpShard RPC.
-func RestoreShard(cli *client.Client, req *RestoreShardRequest, opts ...client.CallOption) (*RestoreShardResponse, error) {
- wResp := &restoreShardResponseWrapper{new(RestoreShardResponse)}
- wReq := &requestWrapper{m: req}
-
- err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcRestoreShard), wReq, wResp, opts...)
- if err != nil {
- return nil, err
- }
-
- return wResp.RestoreShardResponse, nil
-}
-
// SynchronizeTree executes ControlService.SynchronizeTree RPC.
func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...client.CallOption) (*SynchronizeTreeResponse, error) {
wResp := &synchronizeTreeResponseWrapper{new(SynchronizeTreeResponse)}
diff --git a/pkg/services/control/server/dump.go b/pkg/services/control/server/dump.go
deleted file mode 100644
index 28be02aa4..000000000
--- a/pkg/services/control/server/dump.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) DumpShard(_ context.Context, req *control.DumpShardRequest) (*control.DumpShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
-
- var prm shard.DumpPrm
- prm.WithPath(req.GetBody().GetFilepath())
- prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
-
- err = s.s.DumpShard(shardID, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := new(control.DumpShardResponse)
- resp.SetBody(new(control.DumpShardResponse_Body))
-
- err = SignMessage(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
diff --git a/pkg/services/control/server/restore.go b/pkg/services/control/server/restore.go
deleted file mode 100644
index dba186f57..000000000
--- a/pkg/services/control/server/restore.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package control
-
-import (
- "context"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func (s *Server) RestoreShard(ctx context.Context, req *control.RestoreShardRequest) (*control.RestoreShardResponse, error) {
- err := s.isValidRequest(req)
- if err != nil {
- return nil, status.Error(codes.PermissionDenied, err.Error())
- }
-
- shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID())
-
- var prm shard.RestorePrm
- prm.WithPath(req.GetBody().GetFilepath())
- prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors())
-
- err = s.s.RestoreShard(ctx, shardID, prm)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- resp := new(control.RestoreShardResponse)
- resp.SetBody(new(control.RestoreShardResponse_Body))
-
- err = SignMessage(s.key, resp)
- if err != nil {
- return nil, status.Error(codes.Internal, err.Error())
- }
- return resp, nil
-}
diff --git a/pkg/services/control/service.go b/pkg/services/control/service.go
index dd349dc57..ef0c0a8d2 100644
--- a/pkg/services/control/service.go
+++ b/pkg/services/control/service.go
@@ -127,64 +127,6 @@ func (x *SetShardModeResponse) SetBody(v *SetShardModeResponse_Body) {
}
}
-// SetShardID sets shard ID for the dump shard request.
-func (x *DumpShardRequest_Body) SetShardID(id []byte) {
- x.Shard_ID = id
-}
-
-// SetFilepath sets filepath for the dump shard request.
-func (x *DumpShardRequest_Body) SetFilepath(p string) {
- x.Filepath = p
-}
-
-// SetIgnoreErrors sets ignore errors flag for the dump shard request.
-func (x *DumpShardRequest_Body) SetIgnoreErrors(ignore bool) {
- x.IgnoreErrors = ignore
-}
-
-// SetBody sets request body.
-func (x *DumpShardRequest) SetBody(v *DumpShardRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets response body.
-func (x *DumpShardResponse) SetBody(v *DumpShardResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetShardID sets shard ID for the restore shard request.
-func (x *RestoreShardRequest_Body) SetShardID(id []byte) {
- x.Shard_ID = id
-}
-
-// SetFilepath sets filepath for the restore shard request.
-func (x *RestoreShardRequest_Body) SetFilepath(p string) {
- x.Filepath = p
-}
-
-// SetIgnoreErrors sets ignore errors flag for the restore shard request.
-func (x *RestoreShardRequest_Body) SetIgnoreErrors(ignore bool) {
- x.IgnoreErrors = ignore
-}
-
-// SetBody sets request body.
-func (x *RestoreShardRequest) SetBody(v *RestoreShardRequest_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
-// SetBody sets response body.
-func (x *RestoreShardResponse) SetBody(v *RestoreShardResponse_Body) {
- if x != nil {
- x.Body = v
- }
-}
-
// SetBody sets list shards request body.
func (x *SynchronizeTreeRequest) SetBody(v *SynchronizeTreeRequest_Body) {
if x != nil {
diff --git a/pkg/services/control/service.pb.go b/pkg/services/control/service.pb.go
index ca3e2770e..d713bb38d 100644
--- a/pkg/services/control/service.pb.go
+++ b/pkg/services/control/service.pb.go
@@ -600,238 +600,6 @@ func (x *SetShardModeResponse) GetSignature() *Signature {
return nil
}
-// DumpShard request.
-type DumpShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of dump shard request message.
- Body *DumpShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DumpShardRequest) Reset() {
- *x = DumpShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardRequest) ProtoMessage() {}
-
-func (x *DumpShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardRequest.ProtoReflect.Descriptor instead.
-func (*DumpShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *DumpShardRequest) GetBody() *DumpShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DumpShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// DumpShard response.
-type DumpShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of dump shard response message.
- Body *DumpShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *DumpShardResponse) Reset() {
- *x = DumpShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardResponse) ProtoMessage() {}
-
-func (x *DumpShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardResponse.ProtoReflect.Descriptor instead.
-func (*DumpShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *DumpShardResponse) GetBody() *DumpShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *DumpShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// RestoreShard request.
-type RestoreShardRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard request message.
- Body *RestoreShardRequest_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RestoreShardRequest) Reset() {
- *x = RestoreShardRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardRequest) ProtoMessage() {}
-
-func (x *RestoreShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardRequest.ProtoReflect.Descriptor instead.
-func (*RestoreShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *RestoreShardRequest) GetBody() *RestoreShardRequest_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RestoreShardRequest) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
-// RestoreShard response.
-type RestoreShardResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Body of restore shard response message.
- Body *RestoreShardResponse_Body `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"`
- // Body signature.
- Signature *Signature `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"`
-}
-
-func (x *RestoreShardResponse) Reset() {
- *x = RestoreShardResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardResponse) ProtoMessage() {}
-
-func (x *RestoreShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardResponse.ProtoReflect.Descriptor instead.
-func (*RestoreShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *RestoreShardResponse) GetBody() *RestoreShardResponse_Body {
- if x != nil {
- return x.Body
- }
- return nil
-}
-
-func (x *RestoreShardResponse) GetSignature() *Signature {
- if x != nil {
- return x.Signature
- }
- return nil
-}
-
// SynchronizeTree request.
type SynchronizeTreeRequest struct {
state protoimpl.MessageState
@@ -847,7 +615,7 @@ type SynchronizeTreeRequest struct {
func (x *SynchronizeTreeRequest) Reset() {
*x = SynchronizeTreeRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
+ mi := &file_pkg_services_control_service_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -860,7 +628,7 @@ func (x *SynchronizeTreeRequest) String() string {
func (*SynchronizeTreeRequest) ProtoMessage() {}
func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[14]
+ mi := &file_pkg_services_control_service_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -873,7 +641,7 @@ func (x *SynchronizeTreeRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use SynchronizeTreeRequest.ProtoReflect.Descriptor instead.
func (*SynchronizeTreeRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10}
}
func (x *SynchronizeTreeRequest) GetBody() *SynchronizeTreeRequest_Body {
@@ -905,7 +673,7 @@ type SynchronizeTreeResponse struct {
func (x *SynchronizeTreeResponse) Reset() {
*x = SynchronizeTreeResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
+ mi := &file_pkg_services_control_service_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -918,7 +686,7 @@ func (x *SynchronizeTreeResponse) String() string {
func (*SynchronizeTreeResponse) ProtoMessage() {}
func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[15]
+ mi := &file_pkg_services_control_service_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -931,7 +699,7 @@ func (x *SynchronizeTreeResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use SynchronizeTreeResponse.ProtoReflect.Descriptor instead.
func (*SynchronizeTreeResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11}
}
func (x *SynchronizeTreeResponse) GetBody() *SynchronizeTreeResponse_Body {
@@ -961,7 +729,7 @@ type EvacuateShardRequest struct {
func (x *EvacuateShardRequest) Reset() {
*x = EvacuateShardRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
+ mi := &file_pkg_services_control_service_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -974,7 +742,7 @@ func (x *EvacuateShardRequest) String() string {
func (*EvacuateShardRequest) ProtoMessage() {}
func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[16]
+ mi := &file_pkg_services_control_service_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -987,7 +755,7 @@ func (x *EvacuateShardRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use EvacuateShardRequest.ProtoReflect.Descriptor instead.
func (*EvacuateShardRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12}
}
func (x *EvacuateShardRequest) GetBody() *EvacuateShardRequest_Body {
@@ -1017,7 +785,7 @@ type EvacuateShardResponse struct {
func (x *EvacuateShardResponse) Reset() {
*x = EvacuateShardResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
+ mi := &file_pkg_services_control_service_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1030,7 +798,7 @@ func (x *EvacuateShardResponse) String() string {
func (*EvacuateShardResponse) ProtoMessage() {}
func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[17]
+ mi := &file_pkg_services_control_service_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1043,7 +811,7 @@ func (x *EvacuateShardResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use EvacuateShardResponse.ProtoReflect.Descriptor instead.
func (*EvacuateShardResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13}
}
func (x *EvacuateShardResponse) GetBody() *EvacuateShardResponse_Body {
@@ -1073,7 +841,7 @@ type FlushCacheRequest struct {
func (x *FlushCacheRequest) Reset() {
*x = FlushCacheRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
+ mi := &file_pkg_services_control_service_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1086,7 +854,7 @@ func (x *FlushCacheRequest) String() string {
func (*FlushCacheRequest) ProtoMessage() {}
func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[18]
+ mi := &file_pkg_services_control_service_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1099,7 +867,7 @@ func (x *FlushCacheRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use FlushCacheRequest.ProtoReflect.Descriptor instead.
func (*FlushCacheRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14}
}
func (x *FlushCacheRequest) GetBody() *FlushCacheRequest_Body {
@@ -1129,7 +897,7 @@ type FlushCacheResponse struct {
func (x *FlushCacheResponse) Reset() {
*x = FlushCacheResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
+ mi := &file_pkg_services_control_service_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1142,7 +910,7 @@ func (x *FlushCacheResponse) String() string {
func (*FlushCacheResponse) ProtoMessage() {}
func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[19]
+ mi := &file_pkg_services_control_service_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1155,7 +923,7 @@ func (x *FlushCacheResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use FlushCacheResponse.ProtoReflect.Descriptor instead.
func (*FlushCacheResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15}
}
func (x *FlushCacheResponse) GetBody() *FlushCacheResponse_Body {
@@ -1185,7 +953,7 @@ type DoctorRequest struct {
func (x *DoctorRequest) Reset() {
*x = DoctorRequest{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ mi := &file_pkg_services_control_service_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1198,7 +966,7 @@ func (x *DoctorRequest) String() string {
func (*DoctorRequest) ProtoMessage() {}
func (x *DoctorRequest) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[20]
+ mi := &file_pkg_services_control_service_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1211,7 +979,7 @@ func (x *DoctorRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DoctorRequest.ProtoReflect.Descriptor instead.
func (*DoctorRequest) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16}
}
func (x *DoctorRequest) GetBody() *DoctorRequest_Body {
@@ -1241,7 +1009,7 @@ type DoctorResponse struct {
func (x *DoctorResponse) Reset() {
*x = DoctorResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ mi := &file_pkg_services_control_service_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1254,7 +1022,7 @@ func (x *DoctorResponse) String() string {
func (*DoctorResponse) ProtoMessage() {}
func (x *DoctorResponse) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[21]
+ mi := &file_pkg_services_control_service_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1267,7 +1035,7 @@ func (x *DoctorResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use DoctorResponse.ProtoReflect.Descriptor instead.
func (*DoctorResponse) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17}
}
func (x *DoctorResponse) GetBody() *DoctorResponse_Body {
@@ -1294,7 +1062,7 @@ type HealthCheckRequest_Body struct {
func (x *HealthCheckRequest_Body) Reset() {
*x = HealthCheckRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ mi := &file_pkg_services_control_service_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1307,7 +1075,7 @@ func (x *HealthCheckRequest_Body) String() string {
func (*HealthCheckRequest_Body) ProtoMessage() {}
func (x *HealthCheckRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[22]
+ mi := &file_pkg_services_control_service_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1338,7 +1106,7 @@ type HealthCheckResponse_Body struct {
func (x *HealthCheckResponse_Body) Reset() {
*x = HealthCheckResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ mi := &file_pkg_services_control_service_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1351,7 +1119,7 @@ func (x *HealthCheckResponse_Body) String() string {
func (*HealthCheckResponse_Body) ProtoMessage() {}
func (x *HealthCheckResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[23]
+ mi := &file_pkg_services_control_service_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1402,7 +1170,7 @@ type SetNetmapStatusRequest_Body struct {
func (x *SetNetmapStatusRequest_Body) Reset() {
*x = SetNetmapStatusRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1415,7 +1183,7 @@ func (x *SetNetmapStatusRequest_Body) String() string {
func (*SetNetmapStatusRequest_Body) ProtoMessage() {}
func (x *SetNetmapStatusRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[24]
+ mi := &file_pkg_services_control_service_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1455,7 +1223,7 @@ type SetNetmapStatusResponse_Body struct {
func (x *SetNetmapStatusResponse_Body) Reset() {
*x = SetNetmapStatusResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1468,7 +1236,7 @@ func (x *SetNetmapStatusResponse_Body) String() string {
func (*SetNetmapStatusResponse_Body) ProtoMessage() {}
func (x *SetNetmapStatusResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[25]
+ mi := &file_pkg_services_control_service_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1498,7 +1266,7 @@ type DropObjectsRequest_Body struct {
func (x *DropObjectsRequest_Body) Reset() {
*x = DropObjectsRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1511,7 +1279,7 @@ func (x *DropObjectsRequest_Body) String() string {
func (*DropObjectsRequest_Body) ProtoMessage() {}
func (x *DropObjectsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[26]
+ mi := &file_pkg_services_control_service_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1544,7 +1312,7 @@ type DropObjectsResponse_Body struct {
func (x *DropObjectsResponse_Body) Reset() {
*x = DropObjectsResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1557,7 +1325,7 @@ func (x *DropObjectsResponse_Body) String() string {
func (*DropObjectsResponse_Body) ProtoMessage() {}
func (x *DropObjectsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[27]
+ mi := &file_pkg_services_control_service_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1583,7 +1351,7 @@ type ListShardsRequest_Body struct {
func (x *ListShardsRequest_Body) Reset() {
*x = ListShardsRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1596,7 +1364,7 @@ func (x *ListShardsRequest_Body) String() string {
func (*ListShardsRequest_Body) ProtoMessage() {}
func (x *ListShardsRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[28]
+ mi := &file_pkg_services_control_service_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1625,7 +1393,7 @@ type ListShardsResponse_Body struct {
func (x *ListShardsResponse_Body) Reset() {
*x = ListShardsResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1638,7 +1406,7 @@ func (x *ListShardsResponse_Body) String() string {
func (*ListShardsResponse_Body) ProtoMessage() {}
func (x *ListShardsResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[29]
+ mi := &file_pkg_services_control_service_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1678,7 +1446,7 @@ type SetShardModeRequest_Body struct {
func (x *SetShardModeRequest_Body) Reset() {
*x = SetShardModeRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1691,7 +1459,7 @@ func (x *SetShardModeRequest_Body) String() string {
func (*SetShardModeRequest_Body) ProtoMessage() {}
func (x *SetShardModeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[30]
+ mi := &file_pkg_services_control_service_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1738,7 +1506,7 @@ type SetShardModeResponse_Body struct {
func (x *SetShardModeResponse_Body) Reset() {
*x = SetShardModeResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1751,7 +1519,7 @@ func (x *SetShardModeResponse_Body) String() string {
func (*SetShardModeResponse_Body) ProtoMessage() {}
func (x *SetShardModeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[31]
+ mi := &file_pkg_services_control_service_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1767,218 +1535,6 @@ func (*SetShardModeResponse_Body) Descriptor() ([]byte, []int) {
return file_pkg_services_control_service_proto_rawDescGZIP(), []int{9, 0}
}
-// Request body structure.
-type DumpShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to the output.
- Filepath string `protobuf:"bytes,2,opt,name=filepath,proto3" json:"filepath,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,3,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *DumpShardRequest_Body) Reset() {
- *x = DumpShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardRequest_Body) ProtoMessage() {}
-
-func (x *DumpShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*DumpShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *DumpShardRequest_Body) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *DumpShardRequest_Body) GetFilepath() string {
- if x != nil {
- return x.Filepath
- }
- return ""
-}
-
-func (x *DumpShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type DumpShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *DumpShardResponse_Body) Reset() {
- *x = DumpShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DumpShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DumpShardResponse_Body) ProtoMessage() {}
-
-func (x *DumpShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DumpShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*DumpShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0}
-}
-
-// Request body structure.
-type RestoreShardRequest_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // ID of the shard.
- Shard_ID []byte `protobuf:"bytes,1,opt,name=shard_ID,json=shardID,proto3" json:"shard_ID,omitempty"`
- // Path to the output.
- Filepath string `protobuf:"bytes,2,opt,name=filepath,proto3" json:"filepath,omitempty"`
- // Flag indicating whether object read errors should be ignored.
- IgnoreErrors bool `protobuf:"varint,3,opt,name=ignore_errors,json=ignoreErrors,proto3" json:"ignore_errors,omitempty"`
-}
-
-func (x *RestoreShardRequest_Body) Reset() {
- *x = RestoreShardRequest_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardRequest_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardRequest_Body) ProtoMessage() {}
-
-func (x *RestoreShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardRequest_Body.ProtoReflect.Descriptor instead.
-func (*RestoreShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *RestoreShardRequest_Body) GetShard_ID() []byte {
- if x != nil {
- return x.Shard_ID
- }
- return nil
-}
-
-func (x *RestoreShardRequest_Body) GetFilepath() string {
- if x != nil {
- return x.Filepath
- }
- return ""
-}
-
-func (x *RestoreShardRequest_Body) GetIgnoreErrors() bool {
- if x != nil {
- return x.IgnoreErrors
- }
- return false
-}
-
-// Response body structure.
-type RestoreShardResponse_Body struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *RestoreShardResponse_Body) Reset() {
- *x = RestoreShardResponse_Body{}
- if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreShardResponse_Body) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreShardResponse_Body) ProtoMessage() {}
-
-func (x *RestoreShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreShardResponse_Body.ProtoReflect.Descriptor instead.
-func (*RestoreShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0}
-}
-
// Request body structure.
type SynchronizeTreeRequest_Body struct {
state protoimpl.MessageState
@@ -1994,7 +1550,7 @@ type SynchronizeTreeRequest_Body struct {
func (x *SynchronizeTreeRequest_Body) Reset() {
*x = SynchronizeTreeRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2007,7 +1563,7 @@ func (x *SynchronizeTreeRequest_Body) String() string {
func (*SynchronizeTreeRequest_Body) ProtoMessage() {}
func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[36]
+ mi := &file_pkg_services_control_service_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2020,7 +1576,7 @@ func (x *SynchronizeTreeRequest_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use SynchronizeTreeRequest_Body.ProtoReflect.Descriptor instead.
func (*SynchronizeTreeRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{10, 0}
}
func (x *SynchronizeTreeRequest_Body) GetContainerId() []byte {
@@ -2054,7 +1610,7 @@ type SynchronizeTreeResponse_Body struct {
func (x *SynchronizeTreeResponse_Body) Reset() {
*x = SynchronizeTreeResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2067,7 +1623,7 @@ func (x *SynchronizeTreeResponse_Body) String() string {
func (*SynchronizeTreeResponse_Body) ProtoMessage() {}
func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[37]
+ mi := &file_pkg_services_control_service_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2080,7 +1636,7 @@ func (x *SynchronizeTreeResponse_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use SynchronizeTreeResponse_Body.ProtoReflect.Descriptor instead.
func (*SynchronizeTreeResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{11, 0}
}
// Request body structure.
@@ -2098,7 +1654,7 @@ type EvacuateShardRequest_Body struct {
func (x *EvacuateShardRequest_Body) Reset() {
*x = EvacuateShardRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2111,7 +1667,7 @@ func (x *EvacuateShardRequest_Body) String() string {
func (*EvacuateShardRequest_Body) ProtoMessage() {}
func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[38]
+ mi := &file_pkg_services_control_service_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2124,7 +1680,7 @@ func (x *EvacuateShardRequest_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use EvacuateShardRequest_Body.ProtoReflect.Descriptor instead.
func (*EvacuateShardRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{12, 0}
}
func (x *EvacuateShardRequest_Body) GetShard_ID() [][]byte {
@@ -2153,7 +1709,7 @@ type EvacuateShardResponse_Body struct {
func (x *EvacuateShardResponse_Body) Reset() {
*x = EvacuateShardResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2166,7 +1722,7 @@ func (x *EvacuateShardResponse_Body) String() string {
func (*EvacuateShardResponse_Body) ProtoMessage() {}
func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[39]
+ mi := &file_pkg_services_control_service_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2179,7 +1735,7 @@ func (x *EvacuateShardResponse_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use EvacuateShardResponse_Body.ProtoReflect.Descriptor instead.
func (*EvacuateShardResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{13, 0}
}
func (x *EvacuateShardResponse_Body) GetCount() uint32 {
@@ -2202,7 +1758,7 @@ type FlushCacheRequest_Body struct {
func (x *FlushCacheRequest_Body) Reset() {
*x = FlushCacheRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[40]
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2215,7 +1771,7 @@ func (x *FlushCacheRequest_Body) String() string {
func (*FlushCacheRequest_Body) ProtoMessage() {}
func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[40]
+ mi := &file_pkg_services_control_service_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2228,7 +1784,7 @@ func (x *FlushCacheRequest_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use FlushCacheRequest_Body.ProtoReflect.Descriptor instead.
func (*FlushCacheRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{18, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{14, 0}
}
func (x *FlushCacheRequest_Body) GetShard_ID() [][]byte {
@@ -2248,7 +1804,7 @@ type FlushCacheResponse_Body struct {
func (x *FlushCacheResponse_Body) Reset() {
*x = FlushCacheResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[41]
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2261,7 +1817,7 @@ func (x *FlushCacheResponse_Body) String() string {
func (*FlushCacheResponse_Body) ProtoMessage() {}
func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[41]
+ mi := &file_pkg_services_control_service_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2274,7 +1830,7 @@ func (x *FlushCacheResponse_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use FlushCacheResponse_Body.ProtoReflect.Descriptor instead.
func (*FlushCacheResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{19, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{15, 0}
}
// Request body structure.
@@ -2292,7 +1848,7 @@ type DoctorRequest_Body struct {
func (x *DoctorRequest_Body) Reset() {
*x = DoctorRequest_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2305,7 +1861,7 @@ func (x *DoctorRequest_Body) String() string {
func (*DoctorRequest_Body) ProtoMessage() {}
func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[42]
+ mi := &file_pkg_services_control_service_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2318,7 +1874,7 @@ func (x *DoctorRequest_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use DoctorRequest_Body.ProtoReflect.Descriptor instead.
func (*DoctorRequest_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{20, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{16, 0}
}
func (x *DoctorRequest_Body) GetConcurrency() uint32 {
@@ -2345,7 +1901,7 @@ type DoctorResponse_Body struct {
func (x *DoctorResponse_Body) Reset() {
*x = DoctorResponse_Body{}
if protoimpl.UnsafeEnabled {
- mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2358,7 +1914,7 @@ func (x *DoctorResponse_Body) String() string {
func (*DoctorResponse_Body) ProtoMessage() {}
func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message {
- mi := &file_pkg_services_control_service_proto_msgTypes[43]
+ mi := &file_pkg_services_control_service_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2371,7 +1927,7 @@ func (x *DoctorResponse_Body) ProtoReflect() protoreflect.Message {
// Deprecated: Use DoctorResponse_Body.ProtoReflect.Descriptor instead.
func (*DoctorResponse_Body) Descriptor() ([]byte, []int) {
- return file_pkg_services_control_service_proto_rawDescGZIP(), []int{21, 0}
+ return file_pkg_services_control_service_proto_rawDescGZIP(), []int{17, 0}
}
var File_pkg_services_control_service_proto protoreflect.FileDescriptor
@@ -2493,194 +2049,140 @@ var file_pkg_services_control_service_proto_rawDesc = []byte{
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x22, 0xdc, 0x01, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49,
- 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a,
- 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e,
+ 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69,
+ 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x5a, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
+ 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x12, 0x16,
+ 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
+ 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe2, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x35, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x62, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x66,
- 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66,
- 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72,
- 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x88, 0x01, 0x0a,
- 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65,
- 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a,
- 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a,
- 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63,
- 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
- 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x5a,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
- 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f,
- 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x72, 0x65,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65,
- 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x17, 0x53,
- 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64,
- 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x14,
- 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x46,
- 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49,
- 0x44, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63, 0x75,
- 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x37, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+ 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x45, 0x76, 0x61, 0x63,
+ 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x36, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42,
- 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04, 0x42,
- 0x6f, 0x64, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x46, 0x6c,
- 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x33, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63,
- 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04,
- 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
- 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19,
- 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x6c,
- 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x34, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61,
- 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79,
- 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79,
- 0x22, 0xc9, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f,
- 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62,
- 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x20, 0x0a,
- 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12,
- 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x7c, 0x0a, 0x0e,
- 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30,
- 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f,
+ 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52,
+ 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x46, 0x0a, 0x04, 0x42, 0x6f,
+ 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, 0x12, 0x23, 0x0a,
+ 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53,
+ 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69,
+ 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x1c, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x11, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79,
0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69,
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0xc7, 0x06, 0x0a, 0x0e, 0x43,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a,
- 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65,
- 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e,
- 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a,
- 0x0b, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
- 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b,
- 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c,
- 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72,
- 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d,
- 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x09, 0x44,
- 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x75,
- 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x4b, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12,
- 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53,
- 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f,
- 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12,
- 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72,
- 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68,
- 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68,
- 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76,
- 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61,
- 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
- 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68,
- 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63,
- 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63,
- 0x74, 0x6f, 0x72, 0x12, 0x16, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f,
- 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f,
- 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
- 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f,
- 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
- 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x72, 0x65, 0x1a, 0x21, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x5f, 0x49, 0x44, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x68,
+ 0x61, 0x72, 0x64, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x04,
+ 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x22, 0xc9, 0x01, 0x0a,
+ 0x0d, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f,
+ 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12,
+ 0x30, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67,
+ 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x1a, 0x55, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b,
+ 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72,
+ 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x44, 0x75,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x6f, 0x63, 0x74,
+ 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x30, 0x0a, 0x09,
+ 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x1a, 0x06,
+ 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x32, 0xb6, 0x05, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x6d, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x72, 0x6f,
+ 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e,
+ 0x44, 0x72, 0x6f, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64,
+ 0x73, 0x12, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x68, 0x61, 0x72,
+ 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x53, 0x65,
+ 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x68,
+ 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65, 0x54, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a, 0x65,
+ 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x68, 0x72, 0x6f, 0x6e, 0x69, 0x7a,
+ 0x65, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a,
+ 0x0d, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74,
+ 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x45, 0x76, 0x61, 0x63, 0x75, 0x61, 0x74, 0x65,
+ 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a,
+ 0x0a, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1a, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x16,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2e, 0x44, 0x6f, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
+ 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69,
+ 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62,
+ 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b,
+ 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2695,7 +2197,7 @@ func file_pkg_services_control_service_proto_rawDescGZIP() []byte {
return file_pkg_services_control_service_proto_rawDescData
}
-var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 44)
+var file_pkg_services_control_service_proto_msgTypes = make([]protoimpl.MessageInfo, 36)
var file_pkg_services_control_service_proto_goTypes = []interface{}{
(*HealthCheckRequest)(nil), // 0: control.HealthCheckRequest
(*HealthCheckResponse)(nil), // 1: control.HealthCheckResponse
@@ -2707,123 +2209,103 @@ var file_pkg_services_control_service_proto_goTypes = []interface{}{
(*ListShardsResponse)(nil), // 7: control.ListShardsResponse
(*SetShardModeRequest)(nil), // 8: control.SetShardModeRequest
(*SetShardModeResponse)(nil), // 9: control.SetShardModeResponse
- (*DumpShardRequest)(nil), // 10: control.DumpShardRequest
- (*DumpShardResponse)(nil), // 11: control.DumpShardResponse
- (*RestoreShardRequest)(nil), // 12: control.RestoreShardRequest
- (*RestoreShardResponse)(nil), // 13: control.RestoreShardResponse
- (*SynchronizeTreeRequest)(nil), // 14: control.SynchronizeTreeRequest
- (*SynchronizeTreeResponse)(nil), // 15: control.SynchronizeTreeResponse
- (*EvacuateShardRequest)(nil), // 16: control.EvacuateShardRequest
- (*EvacuateShardResponse)(nil), // 17: control.EvacuateShardResponse
- (*FlushCacheRequest)(nil), // 18: control.FlushCacheRequest
- (*FlushCacheResponse)(nil), // 19: control.FlushCacheResponse
- (*DoctorRequest)(nil), // 20: control.DoctorRequest
- (*DoctorResponse)(nil), // 21: control.DoctorResponse
- (*HealthCheckRequest_Body)(nil), // 22: control.HealthCheckRequest.Body
- (*HealthCheckResponse_Body)(nil), // 23: control.HealthCheckResponse.Body
- (*SetNetmapStatusRequest_Body)(nil), // 24: control.SetNetmapStatusRequest.Body
- (*SetNetmapStatusResponse_Body)(nil), // 25: control.SetNetmapStatusResponse.Body
- (*DropObjectsRequest_Body)(nil), // 26: control.DropObjectsRequest.Body
- (*DropObjectsResponse_Body)(nil), // 27: control.DropObjectsResponse.Body
- (*ListShardsRequest_Body)(nil), // 28: control.ListShardsRequest.Body
- (*ListShardsResponse_Body)(nil), // 29: control.ListShardsResponse.Body
- (*SetShardModeRequest_Body)(nil), // 30: control.SetShardModeRequest.Body
- (*SetShardModeResponse_Body)(nil), // 31: control.SetShardModeResponse.Body
- (*DumpShardRequest_Body)(nil), // 32: control.DumpShardRequest.Body
- (*DumpShardResponse_Body)(nil), // 33: control.DumpShardResponse.Body
- (*RestoreShardRequest_Body)(nil), // 34: control.RestoreShardRequest.Body
- (*RestoreShardResponse_Body)(nil), // 35: control.RestoreShardResponse.Body
- (*SynchronizeTreeRequest_Body)(nil), // 36: control.SynchronizeTreeRequest.Body
- (*SynchronizeTreeResponse_Body)(nil), // 37: control.SynchronizeTreeResponse.Body
- (*EvacuateShardRequest_Body)(nil), // 38: control.EvacuateShardRequest.Body
- (*EvacuateShardResponse_Body)(nil), // 39: control.EvacuateShardResponse.Body
- (*FlushCacheRequest_Body)(nil), // 40: control.FlushCacheRequest.Body
- (*FlushCacheResponse_Body)(nil), // 41: control.FlushCacheResponse.Body
- (*DoctorRequest_Body)(nil), // 42: control.DoctorRequest.Body
- (*DoctorResponse_Body)(nil), // 43: control.DoctorResponse.Body
- (*Signature)(nil), // 44: control.Signature
- (NetmapStatus)(0), // 45: control.NetmapStatus
- (HealthStatus)(0), // 46: control.HealthStatus
- (*ShardInfo)(nil), // 47: control.ShardInfo
- (ShardMode)(0), // 48: control.ShardMode
+ (*SynchronizeTreeRequest)(nil), // 10: control.SynchronizeTreeRequest
+ (*SynchronizeTreeResponse)(nil), // 11: control.SynchronizeTreeResponse
+ (*EvacuateShardRequest)(nil), // 12: control.EvacuateShardRequest
+ (*EvacuateShardResponse)(nil), // 13: control.EvacuateShardResponse
+ (*FlushCacheRequest)(nil), // 14: control.FlushCacheRequest
+ (*FlushCacheResponse)(nil), // 15: control.FlushCacheResponse
+ (*DoctorRequest)(nil), // 16: control.DoctorRequest
+ (*DoctorResponse)(nil), // 17: control.DoctorResponse
+ (*HealthCheckRequest_Body)(nil), // 18: control.HealthCheckRequest.Body
+ (*HealthCheckResponse_Body)(nil), // 19: control.HealthCheckResponse.Body
+ (*SetNetmapStatusRequest_Body)(nil), // 20: control.SetNetmapStatusRequest.Body
+ (*SetNetmapStatusResponse_Body)(nil), // 21: control.SetNetmapStatusResponse.Body
+ (*DropObjectsRequest_Body)(nil), // 22: control.DropObjectsRequest.Body
+ (*DropObjectsResponse_Body)(nil), // 23: control.DropObjectsResponse.Body
+ (*ListShardsRequest_Body)(nil), // 24: control.ListShardsRequest.Body
+ (*ListShardsResponse_Body)(nil), // 25: control.ListShardsResponse.Body
+ (*SetShardModeRequest_Body)(nil), // 26: control.SetShardModeRequest.Body
+ (*SetShardModeResponse_Body)(nil), // 27: control.SetShardModeResponse.Body
+ (*SynchronizeTreeRequest_Body)(nil), // 28: control.SynchronizeTreeRequest.Body
+ (*SynchronizeTreeResponse_Body)(nil), // 29: control.SynchronizeTreeResponse.Body
+ (*EvacuateShardRequest_Body)(nil), // 30: control.EvacuateShardRequest.Body
+ (*EvacuateShardResponse_Body)(nil), // 31: control.EvacuateShardResponse.Body
+ (*FlushCacheRequest_Body)(nil), // 32: control.FlushCacheRequest.Body
+ (*FlushCacheResponse_Body)(nil), // 33: control.FlushCacheResponse.Body
+ (*DoctorRequest_Body)(nil), // 34: control.DoctorRequest.Body
+ (*DoctorResponse_Body)(nil), // 35: control.DoctorResponse.Body
+ (*Signature)(nil), // 36: control.Signature
+ (NetmapStatus)(0), // 37: control.NetmapStatus
+ (HealthStatus)(0), // 38: control.HealthStatus
+ (*ShardInfo)(nil), // 39: control.ShardInfo
+ (ShardMode)(0), // 40: control.ShardMode
}
var file_pkg_services_control_service_proto_depIdxs = []int32{
- 22, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
- 44, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
- 23, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
- 44, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
- 24, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
- 44, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
- 25, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
- 44, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
- 26, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
- 44, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
- 27, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
- 44, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
- 28, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
- 44, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
- 29, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
- 44, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
- 30, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
- 44, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
- 31, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
- 44, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
- 32, // 20: control.DumpShardRequest.body:type_name -> control.DumpShardRequest.Body
- 44, // 21: control.DumpShardRequest.signature:type_name -> control.Signature
- 33, // 22: control.DumpShardResponse.body:type_name -> control.DumpShardResponse.Body
- 44, // 23: control.DumpShardResponse.signature:type_name -> control.Signature
- 34, // 24: control.RestoreShardRequest.body:type_name -> control.RestoreShardRequest.Body
- 44, // 25: control.RestoreShardRequest.signature:type_name -> control.Signature
- 35, // 26: control.RestoreShardResponse.body:type_name -> control.RestoreShardResponse.Body
- 44, // 27: control.RestoreShardResponse.signature:type_name -> control.Signature
- 36, // 28: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
- 44, // 29: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
- 37, // 30: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
- 44, // 31: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
- 38, // 32: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
- 44, // 33: control.EvacuateShardRequest.signature:type_name -> control.Signature
- 39, // 34: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
- 44, // 35: control.EvacuateShardResponse.signature:type_name -> control.Signature
- 40, // 36: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
- 44, // 37: control.FlushCacheRequest.signature:type_name -> control.Signature
- 41, // 38: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
- 44, // 39: control.FlushCacheResponse.signature:type_name -> control.Signature
- 42, // 40: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body
- 44, // 41: control.DoctorRequest.signature:type_name -> control.Signature
- 43, // 42: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body
- 44, // 43: control.DoctorResponse.signature:type_name -> control.Signature
- 45, // 44: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
- 46, // 45: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
- 45, // 46: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
- 47, // 47: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
- 48, // 48: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
- 0, // 49: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
- 2, // 50: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
- 4, // 51: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
- 6, // 52: control.ControlService.ListShards:input_type -> control.ListShardsRequest
- 8, // 53: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
- 10, // 54: control.ControlService.DumpShard:input_type -> control.DumpShardRequest
- 12, // 55: control.ControlService.RestoreShard:input_type -> control.RestoreShardRequest
- 14, // 56: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
- 16, // 57: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
- 18, // 58: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
- 20, // 59: control.ControlService.Doctor:input_type -> control.DoctorRequest
- 1, // 60: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
- 3, // 61: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
- 5, // 62: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
- 7, // 63: control.ControlService.ListShards:output_type -> control.ListShardsResponse
- 9, // 64: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
- 11, // 65: control.ControlService.DumpShard:output_type -> control.DumpShardResponse
- 13, // 66: control.ControlService.RestoreShard:output_type -> control.RestoreShardResponse
- 15, // 67: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
- 17, // 68: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
- 19, // 69: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
- 21, // 70: control.ControlService.Doctor:output_type -> control.DoctorResponse
- 60, // [60:71] is the sub-list for method output_type
- 49, // [49:60] is the sub-list for method input_type
- 49, // [49:49] is the sub-list for extension type_name
- 49, // [49:49] is the sub-list for extension extendee
- 0, // [0:49] is the sub-list for field type_name
+ 18, // 0: control.HealthCheckRequest.body:type_name -> control.HealthCheckRequest.Body
+ 36, // 1: control.HealthCheckRequest.signature:type_name -> control.Signature
+ 19, // 2: control.HealthCheckResponse.body:type_name -> control.HealthCheckResponse.Body
+ 36, // 3: control.HealthCheckResponse.signature:type_name -> control.Signature
+ 20, // 4: control.SetNetmapStatusRequest.body:type_name -> control.SetNetmapStatusRequest.Body
+ 36, // 5: control.SetNetmapStatusRequest.signature:type_name -> control.Signature
+ 21, // 6: control.SetNetmapStatusResponse.body:type_name -> control.SetNetmapStatusResponse.Body
+ 36, // 7: control.SetNetmapStatusResponse.signature:type_name -> control.Signature
+ 22, // 8: control.DropObjectsRequest.body:type_name -> control.DropObjectsRequest.Body
+ 36, // 9: control.DropObjectsRequest.signature:type_name -> control.Signature
+ 23, // 10: control.DropObjectsResponse.body:type_name -> control.DropObjectsResponse.Body
+ 36, // 11: control.DropObjectsResponse.signature:type_name -> control.Signature
+ 24, // 12: control.ListShardsRequest.body:type_name -> control.ListShardsRequest.Body
+ 36, // 13: control.ListShardsRequest.signature:type_name -> control.Signature
+ 25, // 14: control.ListShardsResponse.body:type_name -> control.ListShardsResponse.Body
+ 36, // 15: control.ListShardsResponse.signature:type_name -> control.Signature
+ 26, // 16: control.SetShardModeRequest.body:type_name -> control.SetShardModeRequest.Body
+ 36, // 17: control.SetShardModeRequest.signature:type_name -> control.Signature
+ 27, // 18: control.SetShardModeResponse.body:type_name -> control.SetShardModeResponse.Body
+ 36, // 19: control.SetShardModeResponse.signature:type_name -> control.Signature
+ 28, // 20: control.SynchronizeTreeRequest.body:type_name -> control.SynchronizeTreeRequest.Body
+ 36, // 21: control.SynchronizeTreeRequest.signature:type_name -> control.Signature
+ 29, // 22: control.SynchronizeTreeResponse.body:type_name -> control.SynchronizeTreeResponse.Body
+ 36, // 23: control.SynchronizeTreeResponse.signature:type_name -> control.Signature
+ 30, // 24: control.EvacuateShardRequest.body:type_name -> control.EvacuateShardRequest.Body
+ 36, // 25: control.EvacuateShardRequest.signature:type_name -> control.Signature
+ 31, // 26: control.EvacuateShardResponse.body:type_name -> control.EvacuateShardResponse.Body
+ 36, // 27: control.EvacuateShardResponse.signature:type_name -> control.Signature
+ 32, // 28: control.FlushCacheRequest.body:type_name -> control.FlushCacheRequest.Body
+ 36, // 29: control.FlushCacheRequest.signature:type_name -> control.Signature
+ 33, // 30: control.FlushCacheResponse.body:type_name -> control.FlushCacheResponse.Body
+ 36, // 31: control.FlushCacheResponse.signature:type_name -> control.Signature
+ 34, // 32: control.DoctorRequest.body:type_name -> control.DoctorRequest.Body
+ 36, // 33: control.DoctorRequest.signature:type_name -> control.Signature
+ 35, // 34: control.DoctorResponse.body:type_name -> control.DoctorResponse.Body
+ 36, // 35: control.DoctorResponse.signature:type_name -> control.Signature
+ 37, // 36: control.HealthCheckResponse.Body.netmap_status:type_name -> control.NetmapStatus
+ 38, // 37: control.HealthCheckResponse.Body.health_status:type_name -> control.HealthStatus
+ 37, // 38: control.SetNetmapStatusRequest.Body.status:type_name -> control.NetmapStatus
+ 39, // 39: control.ListShardsResponse.Body.shards:type_name -> control.ShardInfo
+ 40, // 40: control.SetShardModeRequest.Body.mode:type_name -> control.ShardMode
+ 0, // 41: control.ControlService.HealthCheck:input_type -> control.HealthCheckRequest
+ 2, // 42: control.ControlService.SetNetmapStatus:input_type -> control.SetNetmapStatusRequest
+ 4, // 43: control.ControlService.DropObjects:input_type -> control.DropObjectsRequest
+ 6, // 44: control.ControlService.ListShards:input_type -> control.ListShardsRequest
+ 8, // 45: control.ControlService.SetShardMode:input_type -> control.SetShardModeRequest
+ 10, // 46: control.ControlService.SynchronizeTree:input_type -> control.SynchronizeTreeRequest
+ 12, // 47: control.ControlService.EvacuateShard:input_type -> control.EvacuateShardRequest
+ 14, // 48: control.ControlService.FlushCache:input_type -> control.FlushCacheRequest
+ 16, // 49: control.ControlService.Doctor:input_type -> control.DoctorRequest
+ 1, // 50: control.ControlService.HealthCheck:output_type -> control.HealthCheckResponse
+ 3, // 51: control.ControlService.SetNetmapStatus:output_type -> control.SetNetmapStatusResponse
+ 5, // 52: control.ControlService.DropObjects:output_type -> control.DropObjectsResponse
+ 7, // 53: control.ControlService.ListShards:output_type -> control.ListShardsResponse
+ 9, // 54: control.ControlService.SetShardMode:output_type -> control.SetShardModeResponse
+ 11, // 55: control.ControlService.SynchronizeTree:output_type -> control.SynchronizeTreeResponse
+ 13, // 56: control.ControlService.EvacuateShard:output_type -> control.EvacuateShardResponse
+ 15, // 57: control.ControlService.FlushCache:output_type -> control.FlushCacheResponse
+ 17, // 58: control.ControlService.Doctor:output_type -> control.DoctorResponse
+ 50, // [50:59] is the sub-list for method output_type
+ 41, // [41:50] is the sub-list for method input_type
+ 41, // [41:41] is the sub-list for extension type_name
+ 41, // [41:41] is the sub-list for extension extendee
+ 0, // [0:41] is the sub-list for field type_name
}
func init() { file_pkg_services_control_service_proto_init() }
@@ -2954,54 +2436,6 @@ func file_pkg_services_control_service_proto_init() {
}
}
file_pkg_services_control_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeTreeRequest); i {
case 0:
return &v.state
@@ -3013,7 +2447,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeTreeResponse); i {
case 0:
return &v.state
@@ -3025,7 +2459,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EvacuateShardRequest); i {
case 0:
return &v.state
@@ -3037,7 +2471,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EvacuateShardResponse); i {
case 0:
return &v.state
@@ -3049,7 +2483,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushCacheRequest); i {
case 0:
return &v.state
@@ -3061,7 +2495,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushCacheResponse); i {
case 0:
return &v.state
@@ -3073,7 +2507,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DoctorRequest); i {
case 0:
return &v.state
@@ -3085,7 +2519,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DoctorResponse); i {
case 0:
return &v.state
@@ -3097,7 +2531,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HealthCheckRequest_Body); i {
case 0:
return &v.state
@@ -3109,7 +2543,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*HealthCheckResponse_Body); i {
case 0:
return &v.state
@@ -3121,7 +2555,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetNetmapStatusRequest_Body); i {
case 0:
return &v.state
@@ -3133,7 +2567,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetNetmapStatusResponse_Body); i {
case 0:
return &v.state
@@ -3145,7 +2579,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DropObjectsRequest_Body); i {
case 0:
return &v.state
@@ -3157,7 +2591,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DropObjectsResponse_Body); i {
case 0:
return &v.state
@@ -3169,7 +2603,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListShardsRequest_Body); i {
case 0:
return &v.state
@@ -3181,7 +2615,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListShardsResponse_Body); i {
case 0:
return &v.state
@@ -3193,7 +2627,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardModeRequest_Body); i {
case 0:
return &v.state
@@ -3205,7 +2639,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SetShardModeResponse_Body); i {
case 0:
return &v.state
@@ -3217,55 +2651,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DumpShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardRequest_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreShardResponse_Body); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_pkg_services_control_service_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeTreeRequest_Body); i {
case 0:
return &v.state
@@ -3277,7 +2663,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SynchronizeTreeResponse_Body); i {
case 0:
return &v.state
@@ -3289,7 +2675,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EvacuateShardRequest_Body); i {
case 0:
return &v.state
@@ -3301,7 +2687,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EvacuateShardResponse_Body); i {
case 0:
return &v.state
@@ -3313,7 +2699,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushCacheRequest_Body); i {
case 0:
return &v.state
@@ -3325,7 +2711,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FlushCacheResponse_Body); i {
case 0:
return &v.state
@@ -3337,7 +2723,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DoctorRequest_Body); i {
case 0:
return &v.state
@@ -3349,7 +2735,7 @@ func file_pkg_services_control_service_proto_init() {
return nil
}
}
- file_pkg_services_control_service_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ file_pkg_services_control_service_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DoctorResponse_Body); i {
case 0:
return &v.state
@@ -3368,7 +2754,7 @@ func file_pkg_services_control_service_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_services_control_service_proto_rawDesc,
NumEnums: 0,
- NumMessages: 44,
+ NumMessages: 36,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 7c661e661..32a87c744 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -23,12 +23,6 @@ service ControlService {
// Sets mode of the shard.
rpc SetShardMode (SetShardModeRequest) returns (SetShardModeResponse);
- // Dump objects from the shard.
- rpc DumpShard (DumpShardRequest) returns (DumpShardResponse);
-
- // Restore objects from dump.
- rpc RestoreShard (RestoreShardRequest) returns (RestoreShardResponse);
-
// Synchronizes all log operations for the specified tree.
rpc SynchronizeTree (SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
@@ -201,75 +195,6 @@ message SetShardModeResponse {
Signature signature = 2;
}
-// DumpShard request.
-message DumpShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- bytes shard_ID = 1;
-
- // Path to the output.
- string filepath = 2;
-
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 3;
- }
-
- // Body of dump shard request message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-// DumpShard response.
-message DumpShardResponse {
- // Response body structure.
- message Body {
- }
-
- // Body of dump shard response message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-
-// RestoreShard request.
-message RestoreShardRequest {
- // Request body structure.
- message Body {
- // ID of the shard.
- bytes shard_ID = 1;
-
- // Path to the output.
- string filepath = 2;
-
- // Flag indicating whether object read errors should be ignored.
- bool ignore_errors = 3;
- }
-
- // Body of restore shard request message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
-// RestoreShard response.
-message RestoreShardResponse {
- // Response body structure.
- message Body {
- }
-
- // Body of restore shard response message.
- Body body = 1;
-
- // Body signature.
- Signature signature = 2;
-}
-
// SynchronizeTree request.
message SynchronizeTreeRequest {
// Request body structure.
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 0f50d5893..b9b865a90 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -771,316 +771,6 @@ func (x *SetShardModeResponse) SetSignature(sig *Signature) {
x.Signature = sig
}
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardRequest_Body) StableSize() (size int) {
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.StringSize(2, x.Filepath)
- size += proto.BoolSize(3, x.IgnoreErrors)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.Filepath)
- offset += proto.BoolMarshal(3, buf[offset:], x.IgnoreErrors)
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardRequest) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DumpShardRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DumpShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *DumpShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardResponse_Body) StableSize() (size int) {
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *DumpShardResponse) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *DumpShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *DumpShardResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *DumpShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *DumpShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardRequest_Body) StableSize() (size int) {
- size += proto.BytesSize(1, x.Shard_ID)
- size += proto.StringSize(2, x.Filepath)
- size += proto.BoolSize(3, x.IgnoreErrors)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardRequest_Body) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.BytesMarshal(1, buf[offset:], x.Shard_ID)
- offset += proto.StringMarshal(2, buf[offset:], x.Filepath)
- offset += proto.BoolMarshal(3, buf[offset:], x.IgnoreErrors)
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardRequest) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardRequest) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RestoreShardRequest) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RestoreShardRequest) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *RestoreShardRequest) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardResponse_Body) StableSize() (size int) {
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardResponse_Body) StableMarshal(buf []byte) []byte {
- return buf
-}
-
-// StableSize returns the size of x in protobuf format.
-//
-// Structures with the same field values have the same binary size.
-func (x *RestoreShardResponse) StableSize() (size int) {
- size += proto.NestedStructureSize(1, x.Body)
- size += proto.NestedStructureSize(2, x.Signature)
- return size
-}
-
-// StableMarshal marshals x in protobuf binary format with stable field order.
-//
-// If buffer length is less than x.StableSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same binary format.
-func (x *RestoreShardResponse) StableMarshal(buf []byte) []byte {
- if x == nil {
- return []byte{}
- }
- if buf == nil {
- buf = make([]byte, x.StableSize())
- }
- var offset int
- offset += proto.NestedStructureMarshal(1, buf[offset:], x.Body)
- offset += proto.NestedStructureMarshal(2, buf[offset:], x.Signature)
- return buf
-}
-
-// ReadSignedData fills buf with signed data of x.
-// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
-//
-// Returns any error encountered which did not allow writing the data completely.
-// Otherwise, returns the buffer in which the data is written.
-//
-// Structures with the same field values have the same signed data.
-func (x *RestoreShardResponse) SignedDataSize() int {
- return x.GetBody().StableSize()
-}
-
-// SignedDataSize returns size of the request signed data in bytes.
-//
-// Structures with the same field values have the same signed data size.
-func (x *RestoreShardResponse) ReadSignedData(buf []byte) ([]byte, error) {
- return x.GetBody().StableMarshal(buf), nil
-}
-
-func (x *RestoreShardResponse) SetSignature(sig *Signature) {
- x.Signature = sig
-}
-
// StableSize returns the size of x in protobuf format.
//
// Structures with the same field values have the same binary size.
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index 4a4fbeac1..1e8dd9e3c 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -24,8 +24,6 @@ const (
ControlService_DropObjects_FullMethodName = "/control.ControlService/DropObjects"
ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards"
ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode"
- ControlService_DumpShard_FullMethodName = "/control.ControlService/DumpShard"
- ControlService_RestoreShard_FullMethodName = "/control.ControlService/RestoreShard"
ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree"
ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard"
ControlService_FlushCache_FullMethodName = "/control.ControlService/FlushCache"
@@ -46,10 +44,6 @@ type ControlServiceClient interface {
ListShards(ctx context.Context, in *ListShardsRequest, opts ...grpc.CallOption) (*ListShardsResponse, error)
// Sets mode of the shard.
SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error)
- // Dump objects from the shard.
- DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error)
- // Restore objects from dump.
- RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error)
// EvacuateShard moves all data from one shard to the others.
@@ -113,24 +107,6 @@ func (c *controlServiceClient) SetShardMode(ctx context.Context, in *SetShardMod
return out, nil
}
-func (c *controlServiceClient) DumpShard(ctx context.Context, in *DumpShardRequest, opts ...grpc.CallOption) (*DumpShardResponse, error) {
- out := new(DumpShardResponse)
- err := c.cc.Invoke(ctx, ControlService_DumpShard_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *controlServiceClient) RestoreShard(ctx context.Context, in *RestoreShardRequest, opts ...grpc.CallOption) (*RestoreShardResponse, error) {
- out := new(RestoreShardResponse)
- err := c.cc.Invoke(ctx, ControlService_RestoreShard_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) {
out := new(SynchronizeTreeResponse)
err := c.cc.Invoke(ctx, ControlService_SynchronizeTree_FullMethodName, in, out, opts...)
@@ -181,10 +157,6 @@ type ControlServiceServer interface {
ListShards(context.Context, *ListShardsRequest) (*ListShardsResponse, error)
// Sets mode of the shard.
SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error)
- // Dump objects from the shard.
- DumpShard(context.Context, *DumpShardRequest) (*DumpShardResponse, error)
- // Restore objects from dump.
- RestoreShard(context.Context, *RestoreShardRequest) (*RestoreShardResponse, error)
// Synchronizes all log operations for the specified tree.
SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error)
// EvacuateShard moves all data from one shard to the others.
@@ -214,12 +186,6 @@ func (UnimplementedControlServiceServer) ListShards(context.Context, *ListShards
func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetShardMode not implemented")
}
-func (UnimplementedControlServiceServer) DumpShard(context.Context, *DumpShardRequest) (*DumpShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DumpShard not implemented")
-}
-func (UnimplementedControlServiceServer) RestoreShard(context.Context, *RestoreShardRequest) (*RestoreShardResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RestoreShard not implemented")
-}
func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented")
}
@@ -334,42 +300,6 @@ func _ControlService_SetShardMode_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler)
}
-func _ControlService_DumpShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DumpShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).DumpShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_DumpShard_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).DumpShard(ctx, req.(*DumpShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _ControlService_RestoreShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RestoreShardRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServiceServer).RestoreShard(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: ControlService_RestoreShard_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServiceServer).RestoreShard(ctx, req.(*RestoreShardRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SynchronizeTreeRequest)
if err := dec(in); err != nil {
@@ -469,14 +399,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetShardMode",
Handler: _ControlService_SetShardMode_Handler,
},
- {
- MethodName: "DumpShard",
- Handler: _ControlService_DumpShard_Handler,
- },
- {
- MethodName: "RestoreShard",
- Handler: _ControlService_RestoreShard_Handler,
- },
{
MethodName: "SynchronizeTree",
Handler: _ControlService_SynchronizeTree_Handler,
From 0b42a00a60a5d5e1fd8da386c910296f1ae19647 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 14 Apr 2023 14:22:23 +0300
Subject: [PATCH 0120/1943] [#254] innerring: Remove unused TimersHandlers()
method from processors
Signed-off-by: Evgenii Stratonikov
---
pkg/innerring/bindings.go | 1 -
pkg/innerring/processors/alphabet/processor.go | 5 -----
pkg/innerring/processors/audit/processor.go | 5 -----
pkg/innerring/processors/balance/processor.go | 5 -----
pkg/innerring/processors/container/processor.go | 5 -----
pkg/innerring/processors/frostfs/processor.go | 5 -----
pkg/innerring/processors/governance/processor.go | 5 -----
pkg/innerring/processors/netmap/processor.go | 5 -----
8 files changed, 36 deletions(-)
diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go
index 0e10125c3..c4de07a5f 100644
--- a/pkg/innerring/bindings.go
+++ b/pkg/innerring/bindings.go
@@ -12,7 +12,6 @@ type (
ListenerNotificationHandlers() []event.NotificationHandlerInfo
ListenerNotaryParsers() []event.NotaryParserInfo
ListenerNotaryHandlers() []event.NotaryHandlerInfo
- TimersHandlers() []event.NotificationHandlerInfo
}
)
diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go
index 79b61f14f..169bfb3e2 100644
--- a/pkg/innerring/processors/alphabet/processor.go
+++ b/pkg/innerring/processors/alphabet/processor.go
@@ -106,8 +106,3 @@ func (ap *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (ap *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/audit/processor.go b/pkg/innerring/processors/audit/processor.go
index 31e8a8c55..6e0a98209 100644
--- a/pkg/innerring/processors/audit/processor.go
+++ b/pkg/innerring/processors/audit/processor.go
@@ -130,11 +130,6 @@ func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI
return nil
}
-// TimersHandlers for the 'Timers' event producer.
-func (ap *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
-
// StartAuditHandler for the internal event producer.
func (ap *Processor) StartAuditHandler() event.Handler {
return ap.handleNewAuditRound
diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go
index 370d06f44..7ae639e89 100644
--- a/pkg/innerring/processors/balance/processor.go
+++ b/pkg/innerring/processors/balance/processor.go
@@ -115,8 +115,3 @@ func (bp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (bp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (bp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 123ba77b8..56d2eee96 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -238,8 +238,3 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return hh
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (cp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go
index 4d5bdee78..42362eeed 100644
--- a/pkg/innerring/processors/frostfs/processor.go
+++ b/pkg/innerring/processors/frostfs/processor.go
@@ -226,8 +226,3 @@ func (np *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index 9397186ee..b65dd17b7 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -161,8 +161,3 @@ func (gp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
func (gp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return nil
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (gp *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index 85a123ef3..035ccfafd 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -297,8 +297,3 @@ func (np *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
return hh
}
-
-// TimersHandlers for the 'Timers' event producer.
-func (np *Processor) TimersHandlers() []event.NotificationHandlerInfo {
- return nil
-}
From 262c9c2b93dbb38c3353800224fd9bdaa6c0fa05 Mon Sep 17 00:00:00 2001
From: Pavel Karpy
Date: Fri, 14 Apr 2023 16:42:20 +0300
Subject: [PATCH 0121/1943] [#256] blobovniczaTree: Make `Exists` test stable
Corrupt and request _the same_ file.
Signed-off-by: Pavel Karpy
---
.../blobstor/blobovniczatree/exists_test.go | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
index ff927ccbb..8d9fe526e 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go
@@ -50,16 +50,15 @@ func TestExistsInvalidStorageID(t *testing.T) {
require.False(t, res.Exists)
})
- t.Run("invalid storage id", func(t *testing.T) {
- storageID := slice.Copy(putRes.StorageID)
- storageID[0] = '9'
+ t.Run("valid id but corrupted file", func(t *testing.T) {
+ relBadFileDir := filepath.Join("9", "0")
+ badFileName := "0"
// An invalid boltdb file is created so that it returns an error when opened
- badFileDir := filepath.Join(dir, "9", "0")
- require.NoError(t, os.MkdirAll(badFileDir, os.ModePerm))
- require.NoError(t, os.WriteFile(filepath.Join(badFileDir, "0"), []byte("not a boltdb file content"), 0777))
+ require.NoError(t, os.MkdirAll(filepath.Join(dir, relBadFileDir), os.ModePerm))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, relBadFileDir, badFileName), []byte("not a boltdb file content"), 0777))
- res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: storageID})
+ res, err := b.Exists(context.Background(), common.ExistsPrm{Address: addr, StorageID: []byte(filepath.Join(relBadFileDir, badFileName))})
require.Error(t, err)
require.False(t, res.Exists)
})
From 160147b05d4a51a6fdf29392f67e3282bda9da14 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 14 Apr 2023 10:10:22 +0300
Subject: [PATCH 0122/1943] [#249] adm: Drop subnet support
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-adm/docs/subnetwork-creation.md | 39 -
cmd/frostfs-adm/docs/subnetwork-usage.md | 137 --
.../modules/morph/initialize_deploy.go | 3 -
.../internal/modules/morph/internal/types.go | 65 -
.../modules/morph/internal/types.pb.go | 156 ---
.../modules/morph/internal/types.proto | 15 -
.../internal/modules/morph/root.go | 5 -
.../internal/modules/morph/subnet.go | 1101 -----------------
.../internal/modules/storagecfg/config.go | 3 -
9 files changed, 1524 deletions(-)
delete mode 100644 cmd/frostfs-adm/docs/subnetwork-creation.md
delete mode 100644 cmd/frostfs-adm/docs/subnetwork-usage.md
delete mode 100644 cmd/frostfs-adm/internal/modules/morph/internal/types.go
delete mode 100644 cmd/frostfs-adm/internal/modules/morph/internal/types.pb.go
delete mode 100644 cmd/frostfs-adm/internal/modules/morph/internal/types.proto
delete mode 100644 cmd/frostfs-adm/internal/modules/morph/subnet.go
diff --git a/cmd/frostfs-adm/docs/subnetwork-creation.md b/cmd/frostfs-adm/docs/subnetwork-creation.md
deleted file mode 100644
index 5ada94387..000000000
--- a/cmd/frostfs-adm/docs/subnetwork-creation.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# FrostFS subnetwork creation
-
-This is a short guide on how to create FrostFS subnetworks. This guide
-considers that the sidechain and the inner ring (alphabet nodes) have already been
-deployed and the sidechain contains a deployed `subnet` contract.
-
-## Prerequisites
-
-To follow this guide, you need:
-- neo-go sidechain RPC endpoint;
-- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases);
-- wallet with FrostFS account.
-
-## Creation
-
-```shell
-$ frostfs-adm morph subnet create \
- -r \
- -w \
- --notary
-Create subnet request sent successfully. ID: 4223489767.
-```
-
-**NOTE:** in notary-enabled environment you should have a sufficient
-notary deposit (not expired, with enough GAS balance). Your subnet ID
-will differ from the example.
-
-The default account in the wallet that has been passed with `-w` flag is the owner
-of the just created subnetwork.
-
-You can check if your subnetwork was created successfully:
-
-```shell
-$ frostfs-adm morph subnet get \
- -r \
- --subnet
-Owner: NUc734PMJXiqa2J9jRtvskU3kCdyyuSN8Q
-```
-Your owner will differ from the example.
diff --git a/cmd/frostfs-adm/docs/subnetwork-usage.md b/cmd/frostfs-adm/docs/subnetwork-usage.md
deleted file mode 100644
index 0d505b3a4..000000000
--- a/cmd/frostfs-adm/docs/subnetwork-usage.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Managing Subnetworks
-
-This is a short guide on how to manage FrostFS subnetworks. This guide
-considers that the sidechain and the inner ring (alphabet nodes) have already been
-deployed, and the sidechain contains a deployed `subnet` contract.
-
-## Prerequisites
-
-- neo-go sidechain RPC endpoint;
-- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases);
-- [created](subnetwork-creation.md) subnetwork;
-- wallet with the account that owns the subnetwork;
-- public key of the Storage Node;
-- public keys of the node and client administrators;
-- owner IDs of the FrostFS users.
-
-## Add node administrator
-
-Node administrators are accounts that can manage (add and delete nodes)
-the whitelist of the nodes which can be included to a subnetwork. Only the subnet
-owner is allowed to add and remove node administrators from the subnetwork.
-
-```shell
-$ frostfs-adm morph subnet admin add \
- -r \
- -w \
- --admin \
- --subnet
-Add admin request sent successfully.
-```
-
-## Add node
-
-Adding a node to a subnetwork means that the node becomes able to service
-containers that have been created in that subnetwork. Addition only changes
-the list of the allowed nodes. Node is not required to be bootstrapped at the
-moment of its inclusion.
-
-```shell
-$ frostfs-adm morph subnet node add \
- -r \
- -w \
- --node