[#1502] Use zap.Error
for logging errors
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 3m1s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m29s
Tests and linters / gopls check (pull_request) Successful in 3m50s
Tests and linters / Lint (pull_request) Successful in 4m35s
DCO action / DCO (pull_request) Successful in 5m12s
Tests and linters / Run gofumpt (pull_request) Successful in 5m33s
Build / Build Components (pull_request) Successful in 5m45s
Tests and linters / Tests with -race (pull_request) Successful in 6m37s
Tests and linters / Tests (pull_request) Successful in 7m17s
Tests and linters / Staticcheck (pull_request) Successful in 7m36s
Tests and linters / Run gofumpt (push) Successful in 1m22s
Tests and linters / Staticcheck (push) Successful in 3m19s
Tests and linters / Lint (push) Successful in 4m35s
Vulncheck / Vulncheck (push) Successful in 5m20s
Build / Build Components (push) Successful in 6m16s
Pre-commit hooks / Pre-commit (push) Successful in 6m37s
Tests and linters / Tests (push) Successful in 6m48s
Tests and linters / Tests with -race (push) Successful in 7m15s
Tests and linters / gopls check (push) Successful in 7m27s
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 3m1s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m29s
Tests and linters / gopls check (pull_request) Successful in 3m50s
Tests and linters / Lint (pull_request) Successful in 4m35s
DCO action / DCO (pull_request) Successful in 5m12s
Tests and linters / Run gofumpt (pull_request) Successful in 5m33s
Build / Build Components (pull_request) Successful in 5m45s
Tests and linters / Tests with -race (pull_request) Successful in 6m37s
Tests and linters / Tests (pull_request) Successful in 7m17s
Tests and linters / Staticcheck (pull_request) Successful in 7m36s
Tests and linters / Run gofumpt (push) Successful in 1m22s
Tests and linters / Staticcheck (push) Successful in 3m19s
Tests and linters / Lint (push) Successful in 4m35s
Vulncheck / Vulncheck (push) Successful in 5m20s
Build / Build Components (push) Successful in 6m16s
Pre-commit hooks / Pre-commit (push) Successful in 6m37s
Tests and linters / Tests (push) Successful in 6m48s
Tests and linters / Tests with -race (push) Successful in 7m15s
Tests and linters / gopls check (push) Successful in 7m27s
Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like `zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`. Leave similar expressions with other messages unchanged, for example, `zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`. This change was made by applying the following patch: ```diff @@ var err expression @@ -zap.String("error", err.Error()) +zap.Error(err) @@ var err expression @@ -zap.String("err", err.Error()) +zap.Error(err) ``` Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
This commit is contained in:
parent
8ba9f31fca
commit
f0c43c8d80
56 changed files with 114 additions and 114 deletions
|
@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.init(ctx)
|
||||
|
|
|
@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
|
|||
innerRing.Stop(ctx)
|
||||
if err := metricsCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
if err := pprofCmp.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
|
|||
log.Info(ctx, c.name+" config updated")
|
||||
if err := c.shutdown(ctx); err != nil {
|
||||
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -1119,7 +1119,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
|
|||
err := ls.Close(context.WithoutCancel(ctx))
|
||||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||
|
@ -1209,7 +1209,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
|
|||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
|
|||
err := stopper(ctx)
|
||||
if err != nil {
|
||||
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
|
|||
if err != nil {
|
||||
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||
zap.Any("endpoints", addresses),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
fatalOnErr(err)
|
||||
|
@ -168,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
|||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
|
||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||
|
|
|
@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
|||
_, _, err := makeNotaryDeposit(ctx, c)
|
||||
if err != nil {
|
||||
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
|
|
|
@ -58,7 +58,7 @@ func (c *cfg) MaxObjectSize() uint64 {
|
|||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||
if err != nil {
|
||||
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}),
|
||||
|
|
|
@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
|
|||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||
zap.Stringer("cid", ev.ID),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
|||
if err != nil {
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else if isInnerRingNode {
|
||||
return &ClassifyResult{
|
||||
Role: acl.RoleInnerRing,
|
||||
|
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
|
|||
// is not possible for previous epoch, so
|
||||
// do not throw error, try best case matching
|
||||
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else if isContainerNode {
|
||||
return &ClassifyResult{
|
||||
Role: acl.RoleContainer,
|
||||
|
|
|
@ -100,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
|||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromMainChainBlock = 0
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
mainnetChain.from = fromMainChainBlock
|
||||
|
||||
|
@ -456,7 +456,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
|||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||
if err != nil {
|
||||
fromSideChainBlock = 0
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
|
||||
}
|
||||
|
||||
morphChain := &chainParams{
|
||||
|
|
|
@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
|||
if err != nil {
|
||||
// we don't stop inner ring execution on this error
|
||||
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
s.tickInitialExpoch(ctx)
|
||||
|
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
|
|||
for _, c := range s.closers {
|
||||
if err := c(); err != nil {
|
||||
s.log.Warn(ctx, logs.InnerringCloserError,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
|||
// there is no signature collecting, so we don't need extra fee
|
||||
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
|
|||
networkMap, err := ap.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
|||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||
if err != nil {
|
||||
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
|
|||
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
|
||||
zap.String("receiver", key.Address()),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
|
|||
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
|
||||
zap.Strings("receivers", receiversLog),
|
||||
zap.Int64("amount", int64(gasPerNode)),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
|||
err := cp.checkPutContainer(pctx)
|
||||
if err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
|
|||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
return false
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
|||
err := cp.checkDeleteContainer(e)
|
||||
if err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
|
|||
|
||||
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
|
||||
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
|
|
@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
|
|||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||
if err != nil {
|
||||
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
|||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
|
|||
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// 2. Update NeoFSAlphabet role in the sidechain.
|
||||
|
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
|||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
|
|||
|
||||
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
|
|||
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
|
|||
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
|
||||
if err != nil {
|
||||
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
|
|||
})
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
|||
epochDuration, err := np.netmapClient.EpochDuration()
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
} else {
|
||||
np.epochState.SetEpochDuration(epochDuration)
|
||||
}
|
||||
|
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
|
|||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
|
||||
zap.String("hash", ev.TxHash().StringLE()),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
// get new netmap snapshot
|
||||
networkMap, err := np.netmapClient.NetMap()
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
|
|||
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||
if err != nil {
|
||||
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return false
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
|
|||
func (s *Server) InnerRingIndex(ctx context.Context) int {
|
||||
index, err := s.statusIndex.InnerRingIndex()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
|
|||
func (s *Server) InnerRingSize(ctx context.Context) int {
|
||||
size, err := s.statusIndex.InnerRingSize()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
|
|||
func (s *Server) AlphabetIndex(ctx context.Context) int {
|
||||
index, err := s.statusIndex.AlphabetIndex()
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
|
||||
return -1
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
|
|||
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||
zap.Int8("alphabet_index", int8(letter)),
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
|||
if !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
|||
if !outOfBounds && !client.IsErrObjectNotFound(err) {
|
||||
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||
zap.String("level", p),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
if outOfBounds {
|
||||
|
|
|
@ -44,7 +44,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
|
|||
if prm.IgnoreErrors {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", elem.Address()),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("storage_id", p),
|
||||
zap.String("root_path", b.rootPath))
|
||||
return nil
|
||||
|
@ -77,7 +77,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
|
|||
if err != nil {
|
||||
if ignoreErrors {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("storage_id", p),
|
||||
zap.String("root_path", b.rootPath))
|
||||
return false, nil
|
||||
|
|
|
@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) {
|
|||
if err := b.blcza.Close(ctx); err != nil {
|
||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
b.blcza = nil
|
||||
|
@ -125,7 +125,7 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
|
|||
if err := b.blcza.Close(ctx); err != nil {
|
||||
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||
zap.String("id", b.path),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||
} else {
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
|
|||
} else {
|
||||
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||
zap.String("path", active.SystemPath()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
if errors.Is(err, blobovnicza.ErrNoSpace) {
|
||||
|
|
|
@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error {
|
|||
for i := range b.storage {
|
||||
err := b.storage[i].Storage.Close(ctx)
|
||||
if err != nil {
|
||||
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
|||
for _, err := range errors[:len(errors)-1] {
|
||||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||
zap.Stringer("address", prm.Address),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
if err != nil {
|
||||
if prm.IgnoreErrors {
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("directory_path", dirPath))
|
||||
return nil
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
|
|||
if prm.IgnoreErrors {
|
||||
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("path", path))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
|
|||
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
|
||||
zap.String("storage_path", b.storage[i].Storage.Path()),
|
||||
zap.String("storage_type", b.storage[i].Storage.Type()),
|
||||
zap.String("err", err.Error()))
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
|
||||
|
|
|
@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
|
|||
if err := sh.Close(ctx); err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
|
||||
zap.String("id", id),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
if err != nil {
|
||||
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return false
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
|||
if err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
continue
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (e *StorageEngine) deleteChunks(
|
|||
if err != nil {
|
||||
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
|
||||
zap.Stringer("addr", addr),
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func (e *StorageEngine) reportShardError(
|
|||
if isLogical(err) {
|
||||
e.log.Warn(ctx, msg,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func (e *StorageEngine) reportShardError(
|
|||
e.log.Warn(ctx, msg, append([]zap.Field{
|
||||
zap.Stringer("shard_id", sid),
|
||||
zap.Uint32("error count", errCount),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
}, fields...)...)
|
||||
|
||||
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {
|
||||
|
|
|
@ -106,7 +106,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
|
|||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||
zap.String("error", it.MetaError.Error()),
|
||||
zap.Error(it.MetaError),
|
||||
zap.Stringer("address", prm.addr),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
|||
} else {
|
||||
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
||||
|
@ -165,14 +165,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
|
|||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return
|
||||
}
|
||||
if client.IsErrObjectAlreadyRemoved(err) {
|
||||
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
|
||||
zap.Stringer("shard_id", sh.ID()),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
res.status = putToShardRemoved
|
||||
res.err = err
|
||||
|
|
|
@ -118,7 +118,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
|
|||
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
|
||||
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
|
||||
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
|
||||
zap.String("error", it.MetaError.Error()),
|
||||
zap.Error(it.MetaError),
|
||||
zap.Stringer("address", prm.addr),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
|
|||
if err := obj.Unmarshal(data); err != nil {
|
||||
s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("err", err.Error()))
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
|||
if err != nil {
|
||||
s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
|
||||
zap.Stringer("object", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return err
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
|
|||
if err != nil && !client.IsErrObjectNotFound(err) {
|
||||
s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
|
||||
zap.Stringer("object_address", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -205,7 +205,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) {
|
|||
})
|
||||
if err != nil {
|
||||
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
v.prevGroup.Done()
|
||||
|
@ -313,7 +313,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -334,7 +334,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
result.success = false
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
|
|||
})
|
||||
|
||||
if err = errGroup.Wait(); err != nil {
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -429,7 +429,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
|
|||
res, err := s.metaBase.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
|||
})
|
||||
|
||||
if err = errGroup.Wait(); err != nil {
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
|
||||
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -637,7 +637,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
|
|||
res, err := s.metaBase.InhumeTombstones(ctx, tss)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -665,7 +665,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
|||
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -678,7 +678,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
|||
res, err := s.metaBase.Inhume(ctx, pInhume)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -722,7 +722,7 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
|
|||
_, err := s.metaBase.FreeLockedBy(lockers)
|
||||
if err != nil {
|
||||
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
|
|
@ -110,7 +110,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
|||
}
|
||||
|
||||
s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
|
||||
)
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
|
|||
if err != nil {
|
||||
s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
|
||||
zap.Stringer("cid", lst[i]),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
|
||||
|
||||
continue
|
||||
|
|
|
@ -76,7 +76,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
|
|||
if err != nil || !tryCache {
|
||||
if err != nil {
|
||||
s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
||||
zap.String("err", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
res, err = s.blobStor.Put(ctx, putPrm)
|
||||
|
|
|
@ -390,7 +390,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
|||
height, err = c.rpcActor.GetBlockCount()
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -404,7 +404,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
|||
newHeight, err = c.rpcActor.GetBlockCount()
|
||||
if err != nil {
|
||||
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ func (l *listener) Listen(ctx context.Context) {
|
|||
defer l.wg.Done()
|
||||
if err := l.listen(ctx, nil); err != nil {
|
||||
l.log.Error(ctx, logs.EventCouldNotStartListenToEvents,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
})
|
||||
|
@ -154,7 +154,7 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) {
|
|||
defer l.wg.Done()
|
||||
if err := l.listen(ctx, intError); err != nil {
|
||||
l.log.Error(ctx, logs.EventCouldNotStartListenToEvents,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
l.sendError(ctx, intError, err)
|
||||
}
|
||||
|
@ -342,7 +342,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *
|
|||
event, err := parser(notifyEvent)
|
||||
if err != nil {
|
||||
log.Warn(ctx, logs.EventCouldNotParseNotificationEvent,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -375,13 +375,13 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
|
|||
case errors.Is(err, ErrTXAlreadyHandled):
|
||||
case errors.As(err, &expErr):
|
||||
l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.Uint32("current_block_height", expErr.CurrentBlockHeight),
|
||||
zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight),
|
||||
)
|
||||
default:
|
||||
l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe
|
|||
event, err := parser(notaryEvent)
|
||||
if err != nil {
|
||||
log.Warn(ctx, logs.EventCouldNotParseNotaryEvent,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return
|
||||
|
|
|
@ -77,7 +77,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle
|
|||
})
|
||||
if err != nil {
|
||||
log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func (exec *execCtx) execute(ctx context.Context) error {
|
|||
exec.log.Debug(ctx, logs.ServingRequest)
|
||||
|
||||
if err := exec.executeLocal(ctx); err != nil {
|
||||
exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error()))
|
||||
exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext,
|
||||
zap.String("error", ctx.Err().Error()))
|
||||
zap.Error(ctx.Err()))
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
@ -86,14 +86,14 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
|
|||
|
||||
c, err := exec.svc.clientConstructor.get(info)
|
||||
if err != nil {
|
||||
exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error()))
|
||||
exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
ids, err := c.searchObjects(ctx, exec, info)
|
||||
if err != nil {
|
||||
exec.log.Debug(ctx, logs.SearchRemoteOperationFailed,
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
|
|||
err = exec.writeIDList(ids)
|
||||
mtx.Unlock()
|
||||
if err != nil {
|
||||
exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error()))
|
||||
exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err))
|
||||
return
|
||||
}
|
||||
}(i)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
func (exec *execCtx) executeLocal(ctx context.Context) error {
|
||||
ids, err := exec.svc.localStorage.search(ctx, exec)
|
||||
if err != nil {
|
||||
exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.String("error", err.Error()))
|
||||
exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ func (exec *execCtx) execute(ctx context.Context) error {
|
|||
func (exec *execCtx) logResult(ctx context.Context, err error) {
|
||||
switch {
|
||||
default:
|
||||
exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error()))
|
||||
exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err))
|
||||
case err == nil:
|
||||
exec.log.Debug(ctx, logs.OperationFinishedSuccessfully)
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net
|
|||
l.Error(ctx, logs.UtilObjectServiceError,
|
||||
zap.String("node", network.StringifyGroup(node)),
|
||||
zap.String("request", req),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,6 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net
|
|||
func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) {
|
||||
l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool,
|
||||
zap.String("request", req),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
|
|||
} else {
|
||||
p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance,
|
||||
zap.Stringer("object", addr),
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
|
|||
// consider maintenance mode has object, but do not drop local copy
|
||||
p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
|
||||
} else {
|
||||
p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error()))
|
||||
p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err))
|
||||
}
|
||||
|
||||
return ecChunkProcessResult{
|
||||
|
|
|
@ -61,7 +61,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
|
|||
if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) {
|
||||
p.log.Error(ctx, logs.PolicerUnableToProcessObj,
|
||||
zap.Stringer("object", addr.Address),
|
||||
zap.String("error", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
p.cache.Add(addr.Address, time.Now())
|
||||
p.objsInWork.remove(addr.Address)
|
||||
|
|
|
@ -76,7 +76,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
|
|||
|
||||
if err != nil {
|
||||
log.Error(ctx, logs.ReplicatorCouldNotReplicateObject,
|
||||
zap.String("error", err.Error()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated)
|
||||
|
|
|
@ -59,7 +59,7 @@ func (s *Service) localReplicationWorker(ctx context.Context) {
|
|||
err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false)
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation,
|
||||
zap.String("err", err.Error()))
|
||||
zap.Error(err))
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func (s *Service) replicateLoop(ctx context.Context) {
|
|||
err := s.replicate(op)
|
||||
if err != nil {
|
||||
s.log.Error(ctx, logs.TreeErrorDuringReplication,
|
||||
zap.String("err", err.Error()),
|
||||
zap.Error(err),
|
||||
zap.Stringer("cid", op.cid),
|
||||
zap.String("treeID", op.treeID))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue