forked from TrueCloudLab/frostfs-node
[#240] logs: Factor out common service log messages
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
0e31c12e63
commit
cffcc7745e
15 changed files with 53 additions and 64 deletions
|
@ -1,5 +1,21 @@
|
|||
package logs
|
||||
|
||||
// Common service logs.
|
||||
const (
|
||||
ServingRequest = "serving request..."
|
||||
OperationFinishedSuccessfully = "operation finished successfully"
|
||||
OperationFinishedWithError = "operation finished with error"
|
||||
|
||||
TryingToExecuteInContainer = "trying to execute in container..."
|
||||
CouldNotGetCurrentEpochNumber = "could not get current epoch number"
|
||||
ProcessEpoch = "process epoch"
|
||||
ProcessingNode = "processing node..."
|
||||
NoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration"
|
||||
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
|
||||
|
||||
Notification = "notification"
|
||||
)
|
||||
|
||||
const (
|
||||
InnerringAmountCanNotBeRepresentedAsAnInt64 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go
|
||||
InnerringCantGetUsedSpaceEstimation = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go
|
||||
|
@ -56,7 +72,6 @@ const (
|
|||
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
|
||||
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
|
||||
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
|
||||
SessionServingRequest = "serving request..." // Debug in ../node/pkg/services/session/executor.go
|
||||
TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
|
||||
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
|
||||
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
|
||||
|
@ -115,9 +130,6 @@ const (
|
|||
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
|
||||
TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
|
||||
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
|
||||
DeleteServingRequest = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go
|
||||
DeleteOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go
|
||||
DeleteOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go
|
||||
DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||
DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
|
||||
|
@ -134,47 +146,29 @@ const (
|
|||
DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||
DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
|
||||
GetProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go
|
||||
GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
|
||||
GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
|
||||
GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
|
||||
GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
|
||||
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
|
||||
GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
|
||||
GetCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go
|
||||
GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
|
||||
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
|
||||
GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
|
||||
GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
|
||||
GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
|
||||
GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
|
||||
GetServingRequest = "serving request..." // Debug in ../node/pkg/services/object/get/get.go
|
||||
GetOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go
|
||||
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
|
||||
GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
|
||||
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
|
||||
GetOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go
|
||||
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
|
||||
SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
|
||||
SearchCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go
|
||||
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
|
||||
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
|
||||
SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
|
||||
SearchServingRequest = "serving request..." // Debug in ../node/pkg/services/object/search/search.go
|
||||
SearchOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go
|
||||
SearchOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go
|
||||
UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go
|
||||
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
|
||||
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
|
||||
|
@ -419,13 +413,11 @@ const (
|
|||
AuditParseClientNodeInfo = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go
|
||||
AuditErrorInStorageGroupSearch = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go
|
||||
AuditCouldNotGetStorageGroupObjectForAuditSkipping = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go
|
||||
BalanceNotification = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go
|
||||
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
|
||||
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
|
||||
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
|
||||
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
|
||||
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
|
||||
ContainerNotification = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
|
||||
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
|
||||
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
|
||||
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
|
||||
|
@ -442,7 +434,6 @@ const (
|
|||
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
|
||||
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
|
||||
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
|
||||
FrostFSNotification = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
|
||||
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
|
||||
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||
|
@ -474,7 +465,6 @@ const (
|
|||
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
|
||||
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||
NetmapNotification = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||
|
@ -505,7 +495,6 @@ const (
|
|||
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||
NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||
ReputationNotification = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go
|
||||
ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
|
||||
ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
|
||||
ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
func (bp *Processor) handleLock(ev event.Event) {
|
||||
lock := ev.(balanceEvent.Lock)
|
||||
bp.log.Info(logs.BalanceNotification,
|
||||
bp.log.Info(logs.Notification,
|
||||
zap.String("type", "lock"),
|
||||
zap.String("value", hex.EncodeToString(lock.ID())))
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) {
|
|||
put := ev.(putEvent)
|
||||
|
||||
id := sha256.Sum256(put.Container())
|
||||
cp.log.Info(logs.ContainerNotification,
|
||||
cp.log.Info(logs.Notification,
|
||||
zap.String("type", "container put"),
|
||||
zap.String("id", base58.Encode(id[:])))
|
||||
|
||||
|
@ -30,7 +30,7 @@ func (cp *Processor) handlePut(ev event.Event) {
|
|||
|
||||
func (cp *Processor) handleDelete(ev event.Event) {
|
||||
del := ev.(containerEvent.Delete)
|
||||
cp.log.Info(logs.ContainerNotification,
|
||||
cp.log.Info(logs.Notification,
|
||||
zap.String("type", "container delete"),
|
||||
zap.String("id", base58.Encode(del.ContainerID())))
|
||||
|
||||
|
@ -47,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
|
|||
func (cp *Processor) handleSetEACL(ev event.Event) {
|
||||
e := ev.(containerEvent.SetEACL)
|
||||
|
||||
cp.log.Info(logs.ContainerNotification,
|
||||
cp.log.Info(logs.Notification,
|
||||
zap.String("type", "set EACL"),
|
||||
)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
func (np *Processor) handleDeposit(ev event.Event) {
|
||||
deposit := ev.(frostfsEvent.Deposit)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "deposit"),
|
||||
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
|
||||
|
||||
|
@ -28,7 +28,7 @@ func (np *Processor) handleDeposit(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleWithdraw(ev event.Event) {
|
||||
withdraw := ev.(frostfsEvent.Withdraw)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "withdraw"),
|
||||
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
|
||||
|
||||
|
@ -44,7 +44,7 @@ func (np *Processor) handleWithdraw(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleCheque(ev event.Event) {
|
||||
cheque := ev.(frostfsEvent.Cheque)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "cheque"),
|
||||
zap.String("id", hex.EncodeToString(cheque.ID())))
|
||||
|
||||
|
@ -60,7 +60,7 @@ func (np *Processor) handleCheque(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleConfig(ev event.Event) {
|
||||
cfg := ev.(frostfsEvent.Config)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "set config"),
|
||||
zap.String("key", hex.EncodeToString(cfg.Key())),
|
||||
zap.String("value", hex.EncodeToString(cfg.Value())))
|
||||
|
@ -77,7 +77,7 @@ func (np *Processor) handleConfig(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleBind(ev event.Event) {
|
||||
e := ev.(frostfsEvent.Bind)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "bind"),
|
||||
)
|
||||
|
||||
|
@ -93,7 +93,7 @@ func (np *Processor) handleBind(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleUnbind(ev event.Event) {
|
||||
e := ev.(frostfsEvent.Unbind)
|
||||
np.log.Info(logs.FrostFSNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "unbind"),
|
||||
)
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||
epochEvent := ev.(netmapEvent.NewEpoch)
|
||||
np.log.Info(logs.NetmapNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "new epoch"),
|
||||
zap.Uint64("value", epochEvent.EpochNumber()))
|
||||
|
||||
|
@ -46,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
|
|||
func (np *Processor) handleAddPeer(ev event.Event) {
|
||||
newPeer := ev.(netmapEvent.AddPeer)
|
||||
|
||||
np.log.Info(logs.NetmapNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "add peer"),
|
||||
)
|
||||
|
||||
|
@ -64,7 +64,7 @@ func (np *Processor) handleAddPeer(ev event.Event) {
|
|||
|
||||
func (np *Processor) handleUpdateState(ev event.Event) {
|
||||
updPeer := ev.(netmapEvent.UpdatePeer)
|
||||
np.log.Info(logs.NetmapNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "update peer state"),
|
||||
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
||||
|
||||
|
@ -105,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
|
|||
func (np *Processor) handleRemoveNode(ev event.Event) {
|
||||
removeNode := ev.(subnetevents.RemoveNode)
|
||||
|
||||
np.log.Info(logs.NetmapNotification,
|
||||
np.log.Info(logs.Notification,
|
||||
zap.String("type", "remove node from subnet"),
|
||||
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
|
||||
zap.String("key", hex.EncodeToString(removeNode.Node())),
|
||||
|
|
|
@ -14,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
|
|||
peerID := put.PeerID()
|
||||
|
||||
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
|
||||
rp.log.Info(logs.ReputationNotification,
|
||||
rp.log.Info(logs.Notification,
|
||||
zap.String("type", "reputation put"),
|
||||
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error {
|
|||
}
|
||||
|
||||
func (exec *execCtx) execute(ctx context.Context) {
|
||||
exec.log.Debug(logs.DeleteServingRequest)
|
||||
exec.log.Debug(logs.ServingRequest)
|
||||
|
||||
// perform local operation
|
||||
exec.executeLocal(ctx)
|
||||
|
@ -47,9 +47,9 @@ func (exec *execCtx) analyzeStatus(execCnr bool) {
|
|||
// analyze local result
|
||||
switch exec.status {
|
||||
case statusOK:
|
||||
exec.log.Debug(logs.DeleteOperationFinishedSuccessfully)
|
||||
exec.log.Debug(logs.OperationFinishedSuccessfully)
|
||||
default:
|
||||
exec.log.Debug(logs.DeleteOperationFinishedWithError,
|
||||
exec.log.Debug(logs.OperationFinishedWithError,
|
||||
zap.String("error", exec.err.Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
|
|||
|
||||
lookupDepth := exec.netmapLookupDepth()
|
||||
|
||||
exec.log.Debug(logs.GetTryingToExecuteInContainer,
|
||||
exec.log.Debug(logs.TryingToExecuteInContainer,
|
||||
zap.Uint64("netmap lookup depth", lookupDepth),
|
||||
)
|
||||
|
||||
|
@ -44,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
||||
exec.log.Debug(logs.GetProcessEpoch,
|
||||
exec.log.Debug(logs.ProcessEpoch,
|
||||
zap.Uint64("number", exec.curProcEpoch),
|
||||
)
|
||||
|
||||
|
@ -61,7 +61,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
|||
for {
|
||||
addrs := traverser.Next()
|
||||
if len(addrs) == 0 {
|
||||
exec.log.Debug(logs.GetNoMoreNodesAbortPlacementIteration)
|
||||
exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
|
||||
|
||||
return false
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
|||
for i := range addrs {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
exec.log.Debug(logs.GetInterruptPlacementIterationByContext,
|
||||
exec.log.Debug(logs.InterruptPlacementIterationByContext,
|
||||
zap.String("error", ctx.Err().Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ func (exec *execCtx) initEpoch() bool {
|
|||
exec.status = statusUndefined
|
||||
exec.err = err
|
||||
|
||||
exec.log.Debug(logs.GetCouldNotGetCurrentEpochNumber,
|
||||
exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ func (s *Service) get(ctx context.Context, prm commonPrm, opts ...execOption) st
|
|||
}
|
||||
|
||||
func (exec *execCtx) execute(ctx context.Context) {
|
||||
exec.log.Debug(logs.GetServingRequest)
|
||||
exec.log.Debug(logs.ServingRequest)
|
||||
|
||||
// perform local operation
|
||||
exec.executeLocal(ctx)
|
||||
|
@ -96,7 +96,7 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
|
|||
// analyze local result
|
||||
switch exec.status {
|
||||
case statusOK:
|
||||
exec.log.Debug(logs.GetOperationFinishedSuccessfully)
|
||||
exec.log.Debug(logs.OperationFinishedSuccessfully)
|
||||
case statusINHUMED:
|
||||
exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved)
|
||||
case statusVIRTUAL:
|
||||
|
@ -105,7 +105,7 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
|
|||
case statusOutOfRange:
|
||||
exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds)
|
||||
default:
|
||||
exec.log.Debug(logs.GetOperationFinishedWithError,
|
||||
exec.log.Debug(logs.OperationFinishedWithError,
|
||||
zap.String("error", exec.err.Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ func (exec *execCtx) processNode(ctx context.Context, info client.NodeInfo) bool
|
|||
ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode")
|
||||
defer span.End()
|
||||
|
||||
exec.log.Debug(logs.GetProcessingNode)
|
||||
exec.log.Debug(logs.ProcessingNode)
|
||||
|
||||
client, ok := exec.remoteClient(info)
|
||||
if !ok {
|
||||
|
|
|
@ -18,7 +18,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
|
|||
|
||||
lookupDepth := exec.netmapLookupDepth()
|
||||
|
||||
exec.log.Debug(logs.SearchTryingToExecuteInContainer,
|
||||
exec.log.Debug(logs.TryingToExecuteInContainer,
|
||||
zap.Uint64("netmap lookup depth", lookupDepth),
|
||||
)
|
||||
|
||||
|
@ -49,7 +49,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
||||
exec.log.Debug(logs.SearchProcessEpoch,
|
||||
exec.log.Debug(logs.ProcessEpoch,
|
||||
zap.Uint64("number", exec.curProcEpoch),
|
||||
)
|
||||
|
||||
|
@ -64,7 +64,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
|||
for {
|
||||
addrs := traverser.Next()
|
||||
if len(addrs) == 0 {
|
||||
exec.log.Debug(logs.SearchNoMoreNodesAbortPlacementIteration)
|
||||
exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
|||
defer wg.Done()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
exec.log.Debug(logs.SearchInterruptPlacementIterationByContext,
|
||||
exec.log.Debug(logs.InterruptPlacementIterationByContext,
|
||||
zap.String("error", ctx.Err().Error()))
|
||||
return
|
||||
default:
|
||||
|
@ -87,7 +87,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) bool {
|
|||
|
||||
client.NodeInfoFromNetmapElement(&info, addrs[i])
|
||||
|
||||
exec.log.Debug(logs.SearchProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
|
||||
exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey())))
|
||||
|
||||
c, err := exec.svc.clientConstructor.get(info)
|
||||
if err != nil {
|
||||
|
|
|
@ -81,7 +81,7 @@ func (exec *execCtx) initEpoch() bool {
|
|||
exec.status = statusUndefined
|
||||
exec.err = err
|
||||
|
||||
exec.log.Debug(logs.SearchCouldNotGetCurrentEpochNumber,
|
||||
exec.log.Debug(logs.CouldNotGetCurrentEpochNumber,
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ func (s *Service) Search(ctx context.Context, prm Prm) error {
|
|||
}
|
||||
|
||||
func (exec *execCtx) execute(ctx context.Context) {
|
||||
exec.log.Debug(logs.SearchServingRequest)
|
||||
exec.log.Debug(logs.ServingRequest)
|
||||
|
||||
// perform local operation
|
||||
exec.executeLocal()
|
||||
|
@ -36,11 +36,11 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) {
|
|||
// analyze local result
|
||||
switch exec.status {
|
||||
default:
|
||||
exec.log.Debug(logs.SearchOperationFinishedWithError,
|
||||
exec.log.Debug(logs.OperationFinishedWithError,
|
||||
zap.String("error", exec.err.Error()),
|
||||
)
|
||||
case statusOK:
|
||||
exec.log.Debug(logs.SearchOperationFinishedSuccessfully)
|
||||
exec.log.Debug(logs.OperationFinishedSuccessfully)
|
||||
}
|
||||
|
||||
if execCnr {
|
||||
|
|
|
@ -29,7 +29,7 @@ func NewExecutionService(exec ServiceExecutor, l *logger.Logger) Server {
|
|||
}
|
||||
|
||||
func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) {
|
||||
s.log.Debug(logs.SessionServingRequest,
|
||||
s.log.Debug(logs.ServingRequest,
|
||||
zap.String("component", "SessionService"),
|
||||
zap.String("request", "Create"),
|
||||
)
|
||||
|
|
Loading…
Reference in a new issue