forked from TrueCloudLab/frostfs-node
[#240] logs: Move log messages to constants
Drop duplicate entities. Format entities. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com> Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
d29b13454f
commit
0e31c12e63
149 changed files with 1481 additions and 687 deletions
|
@ -9,6 +9,7 @@ import (
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
|
||||||
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http"
|
||||||
|
@ -80,13 +81,13 @@ func main() {
|
||||||
err = innerRing.Start(ctx, intErr)
|
err = innerRing.Start(ctx, intErr)
|
||||||
exitErr(err)
|
exitErr(err)
|
||||||
|
|
||||||
log.Info("application started",
|
log.Info(logs.CommonApplicationStarted,
|
||||||
zap.String("version", misc.Version))
|
zap.String("version", misc.Version))
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
case err := <-intErr:
|
case err := <-intErr:
|
||||||
log.Info("internal error", zap.String("msg", err.Error()))
|
log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
innerRing.Stop()
|
innerRing.Stop()
|
||||||
|
@ -98,14 +99,14 @@ func main() {
|
||||||
go func() {
|
go func() {
|
||||||
err := srv.Shutdown()
|
err := srv.Shutdown()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("could not shutdown HTTP server",
|
log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("application stopped")
|
log.Info(logs.FrostFSIRApplicationStopped)
|
||||||
}
|
}
|
||||||
|
|
||||||
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {
|
func initHTTPServers(cfg *viper.Viper, log *logger.Logger) []*httputil.Server {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
|
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
|
||||||
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
||||||
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
|
@ -342,13 +343,13 @@ type internals struct {
|
||||||
func (c *cfg) startMaintenance() {
|
func (c *cfg) startMaintenance() {
|
||||||
c.isMaintenance.Store(true)
|
c.isMaintenance.Store(true)
|
||||||
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE)
|
||||||
c.log.Info("started local node's maintenance")
|
c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stops node's maintenance.
|
// stops node's maintenance.
|
||||||
func (c *internals) stopMaintenance() {
|
func (c *internals) stopMaintenance() {
|
||||||
c.isMaintenance.Store(false)
|
c.isMaintenance.Store(false)
|
||||||
c.log.Info("stopped local node's maintenance")
|
c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsMaintenance checks if storage node is under maintenance.
|
// IsMaintenance checks if storage node is under maintenance.
|
||||||
|
@ -881,10 +882,10 @@ func initLocalStorage(c *cfg) {
|
||||||
for _, optsWithMeta := range c.shardOpts() {
|
for _, optsWithMeta := range c.shardOpts() {
|
||||||
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
|
id, err := ls.AddShard(append(optsWithMeta.shOpts, shard.WithTombstoneSource(tombstoneSource))...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("failed to attach shard to engine", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
shardsAttached++
|
shardsAttached++
|
||||||
c.log.Info("shard attached to engine", zap.Stringer("id", id))
|
c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if shardsAttached == 0 {
|
if shardsAttached == 0 {
|
||||||
|
@ -894,15 +895,15 @@ func initLocalStorage(c *cfg) {
|
||||||
c.cfgObject.cfgLocalStorage.localStorage = ls
|
c.cfgObject.cfgLocalStorage.localStorage = ls
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
c.log.Info("closing components of the storage engine...")
|
c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine)
|
||||||
|
|
||||||
err := ls.Close()
|
err := ls.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info("storage engine closing failure",
|
c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
c.log.Info("all components of the storage engine closed successfully")
|
c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -976,11 +977,11 @@ func (c *cfg) bootstrap() error {
|
||||||
// switch to online except when under maintenance
|
// switch to online except when under maintenance
|
||||||
st := c.cfgNetmap.state.controlNetmapStatus()
|
st := c.cfgNetmap.state.controlNetmapStatus()
|
||||||
if st == control.NetmapStatus_MAINTENANCE {
|
if st == control.NetmapStatus_MAINTENANCE {
|
||||||
c.log.Info("bootstrapping with the maintenance state")
|
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
|
||||||
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
|
return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info("bootstrapping with online state",
|
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
|
||||||
zap.Stringer("previous", st),
|
zap.Stringer("previous", st),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1015,32 +1016,32 @@ func (c *cfg) signalWatcher(ctx context.Context) {
|
||||||
case syscall.SIGHUP:
|
case syscall.SIGHUP:
|
||||||
c.reloadConfig(ctx)
|
c.reloadConfig(ctx)
|
||||||
case syscall.SIGTERM, syscall.SIGINT:
|
case syscall.SIGTERM, syscall.SIGINT:
|
||||||
c.log.Info("termination signal has been received, stopping...")
|
c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping)
|
||||||
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
|
// TODO (@acid-ant): #49 need to cover case when stuck at the middle(node health UNDEFINED or STARTING)
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown()
|
||||||
|
|
||||||
c.log.Info("termination signal processing is complete")
|
c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case err := <-c.internalErr: // internal application error
|
case err := <-c.internalErr: // internal application error
|
||||||
c.log.Warn("internal application error",
|
c.log.Warn(logs.FrostFSNodeInternalApplicationError,
|
||||||
zap.String("message", err.Error()))
|
zap.String("message", err.Error()))
|
||||||
|
|
||||||
c.shutdown()
|
c.shutdown()
|
||||||
|
|
||||||
c.log.Info("internal error processing is complete")
|
c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) reloadConfig(ctx context.Context) {
|
func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
c.log.Info("SIGHUP has been received, rereading configuration...")
|
c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration)
|
||||||
|
|
||||||
err := c.readConfig(c.appCfg)
|
err := c.readConfig(c.appCfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("configuration reading", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1052,7 +1053,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
|
|
||||||
logPrm, err := c.loggerPrm()
|
logPrm, err := c.loggerPrm()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("logger configuration preparation", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1060,7 +1061,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
components = append(components, dCmp{"tracing", func() error {
|
components = append(components, dCmp{"tracing", func() error {
|
||||||
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
|
updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
|
||||||
if updated {
|
if updated {
|
||||||
c.log.Info("tracing configation updated")
|
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}})
|
}})
|
||||||
|
@ -1085,20 +1086,20 @@ func (c *cfg) reloadConfig(ctx context.Context) {
|
||||||
|
|
||||||
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("storage engine configuration update", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, component := range components {
|
for _, component := range components {
|
||||||
err = component.reloadFunc()
|
err = component.reloadFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("updated configuration applying",
|
c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
|
||||||
zap.String("component", component.name),
|
zap.String("component", component.name),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info("configuration has been reloaded successfully")
|
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cfg) shutdown() {
|
func (c *cfg) shutdown() {
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
|
|
||||||
containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
containerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
|
@ -136,13 +137,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
||||||
} else {
|
} else {
|
||||||
// unlike removal, we expect successful receive of the container
|
// unlike removal, we expect successful receive of the container
|
||||||
// after successful creation, so logging can be useful
|
// after successful creation, so logging can be useful
|
||||||
c.log.Error("read newly created container after the notification",
|
c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Debug("container creation event's receipt",
|
c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -161,7 +162,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
||||||
|
|
||||||
cachedContainerStorage.handleRemoval(ev.ID)
|
cachedContainerStorage.handleRemoval(ev.ID)
|
||||||
|
|
||||||
c.log.Debug("container removal event's receipt",
|
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
|
||||||
zap.Stringer("id", ev.ID),
|
zap.Stringer("id", ev.ID),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
@ -295,7 +296,7 @@ type morphLoadWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
|
func (w *morphLoadWriter) Put(a containerSDK.SizeEstimation) error {
|
||||||
w.log.Debug("save used space announcement in contract",
|
w.log.Debug(logs.FrostFSNodeSaveUsedSpaceAnnouncementInContract,
|
||||||
zap.Uint64("epoch", a.Epoch()),
|
zap.Uint64("epoch", a.Epoch()),
|
||||||
zap.Stringer("cid", a.Container()),
|
zap.Stringer("cid", a.Container()),
|
||||||
zap.Uint64("size", a.Value()),
|
zap.Uint64("size", a.Value()),
|
||||||
|
@ -458,7 +459,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
||||||
for i := range idList {
|
for i := range idList {
|
||||||
sz, err := engine.ContainerSize(d.engine, idList[i])
|
sz, err := engine.ContainerSize(d.engine, idList[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.log.Debug("failed to calculate container size in storage engine",
|
d.log.Debug(logs.FrostFSNodeFailedToCalculateContainerSizeInStorageEngine,
|
||||||
zap.Stringer("cid", idList[i]),
|
zap.Stringer("cid", idList[i]),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -466,7 +467,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
d.log.Debug("container size in storage engine calculated successfully",
|
d.log.Debug(logs.FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully,
|
||||||
zap.Uint64("size", sz),
|
zap.Uint64("size", sz),
|
||||||
zap.Stringer("cid", idList[i]),
|
zap.Stringer("cid", idList[i]),
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
|
controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
|
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
|
||||||
|
@ -52,7 +53,7 @@ func initControlService(c *cfg) {
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", endpoint)
|
lis, err := net.Listen("tcp", endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("can't listen gRPC endpoint (control)", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -33,7 +34,7 @@ func initGRPC(c *cfg) {
|
||||||
if tlsCfg != nil {
|
if tlsCfg != nil {
|
||||||
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not read certificate from file", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +64,7 @@ func initGRPC(c *cfg) {
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", sc.Endpoint())
|
lis, err := net.Listen("tcp", sc.Endpoint())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("can't listen gRPC endpoint", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,14 +94,14 @@ func serveGRPC(c *cfg) {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer func() {
|
defer func() {
|
||||||
c.log.Info("stop listening gRPC endpoint",
|
c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint,
|
||||||
zap.String("endpoint", lis.Addr().String()),
|
zap.String("endpoint", lis.Addr().String()),
|
||||||
)
|
)
|
||||||
|
|
||||||
c.wg.Done()
|
c.wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
c.log.Info("start listening gRPC endpoint",
|
c.log.Info(logs.FrostFSNodeStartListeningGRPCEndpoint,
|
||||||
zap.String("endpoint", lis.Addr().String()),
|
zap.String("endpoint", lis.Addr().String()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -114,7 +115,7 @@ func serveGRPC(c *cfg) {
|
||||||
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||||
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
l = &logger.Logger{Logger: l.With(zap.String("name", name))}
|
||||||
|
|
||||||
l.Info("stopping gRPC server...")
|
l.Info(logs.FrostFSNodeStoppingGRPCServer)
|
||||||
|
|
||||||
// GracefulStop() may freeze forever, see #1270
|
// GracefulStop() may freeze forever, see #1270
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
@ -126,9 +127,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) {
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
case <-time.After(1 * time.Minute):
|
case <-time.After(1 * time.Minute):
|
||||||
l.Info("gRPC cannot shutdown gracefully, forcing stop")
|
l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop)
|
||||||
s.Stop()
|
s.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Info("gRPC server stopped successfully")
|
l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully)
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -142,14 +143,14 @@ func bootUp(ctx context.Context, c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func wait(c *cfg, cancel func()) {
|
func wait(c *cfg, cancel func()) {
|
||||||
c.log.Info("application started",
|
c.log.Info(logs.CommonApplicationStarted,
|
||||||
zap.String("version", misc.Version))
|
zap.String("version", misc.Version))
|
||||||
|
|
||||||
<-c.done // graceful shutdown
|
<-c.done // graceful shutdown
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
c.log.Debug("waiting for all processes to stop")
|
c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop)
|
||||||
|
|
||||||
c.wg.Wait()
|
c.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
|
@ -49,7 +50,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Info("failed to create neo RPC client",
|
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,
|
||||||
zap.Any("endpoints", addresses),
|
zap.Any("endpoints", addresses),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -58,12 +59,12 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.onShutdown(func() {
|
c.onShutdown(func() {
|
||||||
c.log.Info("closing morph components...")
|
c.log.Info(logs.FrostFSNodeClosingMorphComponents)
|
||||||
cli.Close()
|
cli.Close()
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := cli.SetGroupSignerScope(); err != nil {
|
if err := cli.SetGroupSignerScope(); err != nil {
|
||||||
c.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
|
c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.cfgMorph.client = cli
|
c.cfgMorph.client = cli
|
||||||
|
@ -80,7 +81,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info("notary support",
|
c.log.Info(logs.FrostFSNodeNotarySupport,
|
||||||
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -95,7 +96,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
||||||
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
msPerBlock, err := c.cfgMorph.client.MsPerBlock()
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond
|
||||||
c.log.Debug("morph.cache_ttl fetched from network", zap.Duration("value", c.cfgMorph.cacheTTL))
|
c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL))
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.cfgMorph.cacheTTL < 0 {
|
if c.cfgMorph.cacheTTL < 0 {
|
||||||
|
@ -122,7 +123,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) {
|
||||||
// non-error deposit with an empty TX hash means
|
// non-error deposit with an empty TX hash means
|
||||||
// that the deposit has already been made; no
|
// that the deposit has already been made; no
|
||||||
// need to wait it.
|
// need to wait it.
|
||||||
c.log.Info("notary deposit has already been made")
|
c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,7 +191,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
|
c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
subs, err = subscriber.New(ctx, &subscriber.Params{
|
subs, err = subscriber.New(ctx, &subscriber.Params{
|
||||||
|
@ -215,7 +216,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) {
|
||||||
res, err := netmapEvent.ParseNewEpoch(src)
|
res, err := netmapEvent.ParseNewEpoch(src)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.log.Info("new epoch event from sidechain",
|
c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain,
|
||||||
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -226,11 +227,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
|
||||||
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers)
|
||||||
|
|
||||||
registerBlockHandler(lis, func(block *block.Block) {
|
registerBlockHandler(lis, func(block *block.Block) {
|
||||||
c.log.Debug("new block", zap.Uint32("index", block.Index))
|
c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index))
|
||||||
|
|
||||||
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn("can't update persistent state",
|
c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState,
|
||||||
zap.String("chain", "side"),
|
zap.String("chain", "side"),
|
||||||
zap.Uint32("block_index", block.Index))
|
zap.Uint32("block_index", block.Index))
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
|
netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc"
|
||||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/metrics"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
|
@ -193,7 +194,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
|
if (n-c.cfgNetmap.startEpoch)%reBootstrapInterval == 0 {
|
||||||
err := c.bootstrap()
|
err := c.bootstrap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn("can't send re-bootstrap tx", zap.Error(err))
|
c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -203,7 +204,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
|
|
||||||
ni, err := c.netmapLocalNodeState(e)
|
ni, err := c.netmapLocalNodeState(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not update node state on new epoch",
|
c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
|
||||||
zap.Uint64("epoch", e),
|
zap.Uint64("epoch", e),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -218,7 +219,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
|
||||||
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
addNewEpochAsyncNotificationHandler(c, func(ev event.Event) {
|
||||||
_, err := makeNotaryDeposit(c)
|
_, err := makeNotaryDeposit(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not make notary deposit",
|
c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -298,7 +299,7 @@ func initNetmapState(c *cfg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info("initial network state",
|
c.log.Info(logs.FrostFSNodeInitialNetworkState,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("state", stateWord),
|
zap.String("state", stateWord),
|
||||||
)
|
)
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||||
|
@ -28,7 +29,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
||||||
|
|
||||||
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
|
listRes, err := n.e.ListContainers(engine.ListContainersPrm{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("notificator: could not list containers", zap.Error(err))
|
log.Error(logs.FrostFSNodeNotificatorCouldNotListContainers, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +44,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
||||||
|
|
||||||
selectRes, err := n.e.Select(selectPrm)
|
selectRes, err := n.e.Select(selectPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("notificator: could not select objects from container",
|
log.Error(logs.FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer,
|
||||||
zap.Stringer("cid", c),
|
zap.Stringer("cid", c),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
@ -53,7 +54,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
||||||
for _, a := range selectRes.AddressList() {
|
for _, a := range selectRes.AddressList() {
|
||||||
err = n.processAddress(ctx, a, handler)
|
err = n.processAddress(ctx, a, handler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("notificator: could not process object",
|
log.Error(logs.FrostFSNodeNotificatorCouldNotProcessObject,
|
||||||
zap.Stringer("address", a),
|
zap.Stringer("address", a),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
@ -62,7 +63,7 @@ func (n *notificationSource) Iterate(ctx context.Context, epoch uint64, handler
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("notificator: finished processing object notifications")
|
log.Debug(logs.FrostFSNodeNotificatorFinishedProcessingObjectNotifications)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *notificationSource) processAddress(
|
func (n *notificationSource) processAddress(
|
||||||
|
@ -101,7 +102,7 @@ type notificationWriter struct {
|
||||||
|
|
||||||
func (n notificationWriter) Notify(topic string, address oid.Address) {
|
func (n notificationWriter) Notify(topic string, address oid.Address) {
|
||||||
if err := n.w.Notify(topic, address); err != nil {
|
if err := n.w.Notify(topic, address); err != nil {
|
||||||
n.l.Warn("could not write object notification",
|
n.l.Warn(logs.FrostFSNodeCouldNotWriteObjectNotification,
|
||||||
zap.Stringer("address", address),
|
zap.Stringer("address", address),
|
||||||
zap.String("topic", topic),
|
zap.String("topic", topic),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
|
metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics"
|
||||||
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
|
policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer"
|
||||||
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
|
@ -62,7 +63,7 @@ type objectSvc struct {
|
||||||
func (c *cfg) MaxObjectSize() uint64 {
|
func (c *cfg) MaxObjectSize() uint64 {
|
||||||
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not get max object size value",
|
c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -259,7 +260,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
|
||||||
|
|
||||||
_, err := ls.Inhume(ctx, inhumePrm)
|
_, err := ls.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn("could not inhume mark redundant copy as garbage",
|
c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -600,7 +601,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
c.log.Warn("could not get latest network map to overload the client",
|
c.log.Warn(logs.FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
|
intermediatereputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/intermediate"
|
||||||
localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
|
localreputation "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/local"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/ticker"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||||
|
@ -105,7 +106,7 @@ func addReputationReportHandler(ctx context.Context, c *cfg) {
|
||||||
addNewEpochAsyncNotificationHandler(
|
addNewEpochAsyncNotificationHandler(
|
||||||
c,
|
c,
|
||||||
func(ev event.Event) {
|
func(ev event.Event) {
|
||||||
c.log.Debug("start reporting reputation on new epoch event")
|
c.log.Debug(logs.FrostFSNodeStartReportingReputationOnNewEpochEvent)
|
||||||
|
|
||||||
var reportPrm localtrustcontroller.ReportPrm
|
var reportPrm localtrustcontroller.ReportPrm
|
||||||
|
|
||||||
|
@ -127,13 +128,13 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
|
||||||
|
|
||||||
duration, err := c.cfgNetmap.wrapper.EpochDuration()
|
duration, err := c.cfgNetmap.wrapper.EpochDuration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("could not fetch epoch duration", zap.Error(err))
|
log.Debug(logs.FrostFSNodeCouldNotFetchEpochDuration, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
|
iterations, err := c.cfgNetmap.wrapper.EigenTrustIterations()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("could not fetch iteration number", zap.Error(err))
|
log.Debug(logs.FrostFSNodeCouldNotFetchIterationNumber, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +146,7 @@ func addEigenTrustEpochHandler(ctx context.Context, c *cfg, eigenTrustController
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("could not create fixed epoch timer", zap.Error(err))
|
log.Debug(logs.FrostFSNodeCouldNotCreateFixedEpochTimer, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package common
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
|
@ -71,16 +72,16 @@ func NewRemoteTrustProvider(prm RemoteProviderPrm) *RemoteTrustProvider {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
|
func (rtp *RemoteTrustProvider) InitRemote(srv reputationcommon.ServerInfo) (reputationcommon.WriterProvider, error) {
|
||||||
rtp.log.Debug("initializing remote writer provider")
|
rtp.log.Debug(logs.CommonInitializingRemoteWriterProvider)
|
||||||
|
|
||||||
if srv == nil {
|
if srv == nil {
|
||||||
rtp.log.Debug("route has reached dead-end provider")
|
rtp.log.Debug(logs.CommonRouteHasReachedDeadendProvider)
|
||||||
return rtp.deadEndProvider, nil
|
return rtp.deadEndProvider, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
|
if rtp.netmapKeys.IsLocalKey(srv.PublicKey()) {
|
||||||
// if local => return no-op writer
|
// if local => return no-op writer
|
||||||
rtp.log.Debug("initializing no-op writer provider")
|
rtp.log.Debug(logs.CommonInitializingNoopWriterProvider)
|
||||||
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
|
return trustcontroller.SimpleWriterProvider(new(NopReputationWriter)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package intermediate
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||||
|
@ -31,7 +32,7 @@ type ConsumerTrustWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
func (w *ConsumerTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||||
w.log.Debug("writing received consumer's trusts",
|
w.log.Debug(logs.IntermediateWritingReceivedConsumersTrusts,
|
||||||
zap.Uint64("epoch", w.iterInfo.Epoch()),
|
zap.Uint64("epoch", w.iterInfo.Epoch()),
|
||||||
zap.Uint32("iteration", w.iterInfo.I()),
|
zap.Uint32("iteration", w.iterInfo.I()),
|
||||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust"
|
||||||
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
eigentrustcalc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/calculator"
|
||||||
|
@ -71,7 +72,7 @@ type FinalWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
|
func (fw FinalWriter) WriteIntermediateTrust(t eigentrust.IterationTrust) error {
|
||||||
fw.l.Debug("start writing global trusts to contract")
|
fw.l.Debug(logs.IntermediateStartWritingGlobalTrustsToContract)
|
||||||
|
|
||||||
args := repClient.PutPrm{}
|
args := repClient.PutPrm{}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package intermediate
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/eigentrust/storage/daughters"
|
||||||
|
@ -27,7 +28,7 @@ type DaughterTrustWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
func (w *DaughterTrustWriter) Write(_ context.Context, t reputation.Trust) error {
|
||||||
w.log.Debug("writing received daughter's trusts",
|
w.log.Debug(logs.IntermediateWritingReceivedDaughtersTrusts,
|
||||||
zap.Uint64("epoch", w.ep.Epoch()),
|
zap.Uint64("epoch", w.ep.Epoch()),
|
||||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||||
zap.Stringer("trusted_peer", t.Peer()),
|
zap.Stringer("trusted_peer", t.Peer()),
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
||||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
|
@ -92,7 +93,7 @@ func (rtp *RemoteTrustWriter) Write(ctx context.Context, t reputation.Trust) err
|
||||||
epoch := rtp.iterInfo.Epoch()
|
epoch := rtp.iterInfo.Epoch()
|
||||||
i := rtp.iterInfo.I()
|
i := rtp.iterInfo.I()
|
||||||
|
|
||||||
rtp.log.Debug("announcing trust",
|
rtp.log.Debug(logs.IntermediateAnnouncingTrust,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.Uint32("iteration", i),
|
zap.Uint32("iteration", i),
|
||||||
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
zap.Stringer("trusting_peer", t.TrustingPeer()),
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/common"
|
||||||
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/reputation/internal/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
|
@ -96,7 +97,7 @@ func (rtp *RemoteTrustWriter) Write(_ context.Context, t reputation.Trust) error
|
||||||
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
|
func (rtp *RemoteTrustWriter) Close(ctx context.Context) error {
|
||||||
epoch := rtp.ep.Epoch()
|
epoch := rtp.ep.Epoch()
|
||||||
|
|
||||||
rtp.log.Debug("announcing trusts",
|
rtp.log.Debug(logs.LocalAnnouncingTrusts,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation"
|
||||||
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
reputationcommon "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/reputation/common"
|
||||||
|
@ -27,7 +28,7 @@ type TrustStorage struct {
|
||||||
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
|
func (s *TrustStorage) InitIterator(ep reputationcommon.EpochProvider) (trustcontroller.Iterator, error) {
|
||||||
epoch := ep.Epoch()
|
epoch := ep.Epoch()
|
||||||
|
|
||||||
s.Log.Debug("initializing iterator over trusts",
|
s.Log.Debug(logs.LocalInitializingIteratorOverTrusts,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,7 +15,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
||||||
|
|
||||||
_, err := tracing.Setup(ctx, *conf)
|
_, err := tracing.Setup(ctx, *conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("failed init tracing", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
c.closers = append(c.closers, closer{
|
c.closers = append(c.closers, closer{
|
||||||
|
@ -24,7 +25,7 @@ func initTracing(ctx context.Context, c *cfg) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
err := tracing.Shutdown(ctx) //cfg context cancels before close
|
err := tracing.Shutdown(ctx) //cfg context cancels before close
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("failed shutdown tracing", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
|
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||||
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
containerClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
|
@ -37,7 +38,7 @@ func (c cnrSource) List() ([]cid.ID, error) {
|
||||||
func initTreeService(c *cfg) {
|
func initTreeService(c *cfg) {
|
||||||
treeConfig := treeconfig.Tree(c.appCfg)
|
treeConfig := treeconfig.Tree(c.appCfg)
|
||||||
if !treeConfig.Enabled() {
|
if !treeConfig.Enabled() {
|
||||||
c.log.Info("tree service is not enabled, skip initialization")
|
c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ func initTreeService(c *cfg) {
|
||||||
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
addNewEpochNotificationHandler(c, func(_ event.Event) {
|
||||||
err := c.treeService.SynchronizeAll()
|
err := c.treeService.SynchronizeAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not synchronize Tree Service", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
|
@ -79,7 +80,7 @@ func initTreeService(c *cfg) {
|
||||||
for range tick.C {
|
for range tick.C {
|
||||||
err := c.treeService.SynchronizeAll()
|
err := c.treeService.SynchronizeAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("could not synchronize Tree Service", zap.Error(err))
|
c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err))
|
||||||
if errors.Is(err, tree.ErrShuttingDown) {
|
if errors.Is(err, tree.ErrShuttingDown) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -92,11 +93,11 @@ func initTreeService(c *cfg) {
|
||||||
ev := e.(containerEvent.DeleteSuccess)
|
ev := e.(containerEvent.DeleteSuccess)
|
||||||
|
|
||||||
// This is executed asynchronously, so we don't care about the operation taking some time.
|
// This is executed asynchronously, so we don't care about the operation taking some time.
|
||||||
c.log.Debug("removing all trees for container", zap.Stringer("cid", ev.ID))
|
c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID))
|
||||||
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
err := c.treeService.DropTree(context.Background(), ev.ID, "")
|
||||||
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) {
|
||||||
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
|
||||||
c.log.Error("container removal event received, but trees weren't removed",
|
c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
|
||||||
zap.Stringer("cid", ev.ID),
|
zap.Stringer("cid", ev.ID),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
643
internal/logs/logs.go
Normal file
643
internal/logs/logs.go
Normal file
|
@ -0,0 +1,643 @@
|
||||||
|
package logs
|
||||||
|
|
||||||
|
const (
|
||||||
|
InnerringAmountCanNotBeRepresentedAsAnInt64 = "amount can not be represented as an int64" // Error in ../node/pkg/innerring/settlement.go
|
||||||
|
InnerringCantGetUsedSpaceEstimation = "can't get used space estimation" // Warn in ../node/pkg/innerring/settlement.go
|
||||||
|
InnerringSubnetCreationQueueFailure = "subnet creation queue failure" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringDiscardSubnetCreation = "discard subnet creation" // Info in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringApproveSubnetCreation = "approve subnet creation" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringSubnetRemovalHandlingFailure = "subnet removal handling failure" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringGettingNetmapCandidates = "getting netmap candidates" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringUnmarshallingRemovedSubnetID = "unmarshalling removed subnet ID" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringIteratingNodesSubnets = "iterating node's subnets" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringRemovingNodeFromNetmapCandidates = "removing node from netmap candidates" // Debug in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringRemovingNodeFromCandidates = "removing node from candidates" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringRemovingSubnetFromTheNode = "removing subnet from the node" // Debug in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringUpdatingSubnetInfo = "updating subnet info" // Error in ../node/pkg/innerring/subnet.go
|
||||||
|
InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" // Debug in ../node/pkg/innerring/blocktimer.go
|
||||||
|
InnerringCantStopEpochEstimation = "can't stop epoch estimation" // Warn in ../node/pkg/innerring/blocktimer.go
|
||||||
|
InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" // Error in ../node/pkg/innerring/notary.go
|
||||||
|
InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" // Error in ../node/pkg/innerring/notary.go
|
||||||
|
InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/innerring/notary.go
|
||||||
|
InnerringCantGetInnerRingIndex = "can't get inner ring index" // Error in ../node/pkg/innerring/state.go
|
||||||
|
InnerringCantGetInnerRingSize = "can't get inner ring size" // Error in ../node/pkg/innerring/state.go
|
||||||
|
InnerringCantGetAlphabetIndex = "can't get alphabet index" // Error in ../node/pkg/innerring/state.go
|
||||||
|
InnerringIgnoreValidatorVoteNodeNotInAlphabetRange = "ignore validator vote: node not in alphabet range" // Info in ../node/pkg/innerring/state.go
|
||||||
|
InnerringIgnoreValidatorVoteEmptyValidatorsList = "ignore validator vote: empty validators list" // Info in ../node/pkg/innerring/state.go
|
||||||
|
InnerringCantInvokeVoteMethodInAlphabetContract = "can't invoke vote method in alphabet contract" // Warn in ../node/pkg/innerring/state.go
|
||||||
|
InnerringCantGetLastProcessedMainChainBlockNumber = "can't get last processed main chain block number" // Warn in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringNotarySupport = "notary support" // Info in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringAlphabetKeysSyncIsDisabled = "alphabet keys sync is disabled" // Debug in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringNoControlServerEndpointSpecified = "no Control server endpoint specified, service is disabled" // Info in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringFailedToSetGroupSignerScope = "failed to set group signer scope, continue with Global" // Info in ../node/pkg/innerring/initialization.go
|
||||||
|
InnerringCantVoteForPreparedValidators = "can't vote for prepared validators" // Warn in ../node/pkg/innerring/innerring.go
|
||||||
|
InnerringNewBlock = "new block" // Debug in ../node/pkg/innerring/innerring.go
|
||||||
|
InnerringCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/pkg/innerring/innerring.go
|
||||||
|
InnerringCloserError = "closer error" // Warn in ../node/pkg/innerring/innerring.go
|
||||||
|
InnerringReadConfigFromBlockchain = "read config from blockchain" // Debug in ../node/pkg/innerring/innerring.go
|
||||||
|
InnerringCantSetupRemoteConnection = "can't setup remote connection" // Warn in ../node/pkg/innerring/rpc.go
|
||||||
|
InnerringCantGetStorageGroupObject = "can't get storage group object" // Warn in ../node/pkg/innerring/rpc.go
|
||||||
|
NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" // Debug in ../node/pkg/services/notificator/service.go
|
||||||
|
NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" // Debug in ../node/pkg/services/notificator/service.go
|
||||||
|
PolicerCouldNotGetContainer = "could not get container" // Error in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" // Error in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerCouldNotBuildPlacementVectorForObject = "could not build placement vector for object" // Error in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerRedundantLocalObjectCopyDetected = "redundant local object copy detected" // Info in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerReceiveObjectHeaderToCheckPolicyCompliance = "receive object header to check policy compliance" // Error in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerConsiderNodeUnderMaintenanceAsOK = "consider node under maintenance as OK" // Debug in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerShortageOfObjectCopiesDetected = "shortage of object copies detected" // Debug in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance = "some of the copies are stored on nodes under maintenance, save local copy" // Debug in ../node/pkg/services/policer/check.go
|
||||||
|
PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go
|
||||||
|
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
|
||||||
|
PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go
|
||||||
|
PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go
|
||||||
|
ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go
|
||||||
|
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
|
||||||
|
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
|
||||||
|
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" // Debug in ../node/pkg/services/replicator/process.go
|
||||||
|
SessionServingRequest = "serving request..." // Debug in ../node/pkg/services/session/executor.go
|
||||||
|
TreeRedirectingTreeServiceQuery = "redirecting tree service query" // Debug in ../node/pkg/services/tree/redirect.go
|
||||||
|
TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" // Debug in ../node/pkg/services/tree/signature.go
|
||||||
|
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" // Warn in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeSynchronizeTree = "synchronize tree" // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeFailedToRunTreeSynchronizationOverAllNodes = "failed to run tree synchronization over all nodes" // Warn in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeSyncingTrees = "syncing trees..." // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotFetchContainers = "could not fetch containers" // Error in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeTreesHaveBeenSynchronized = "trees have been synchronized" // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeSyncingContainerTrees = "syncing container trees..." // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotSyncTrees = "could not sync trees" // Error in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeContainerTreesHaveBeenSynced = "container trees have been synced" // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization" // Error in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeRemovingRedundantTrees = "removing redundant trees..." // Debug in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotRemoveRedundantTree = "could not remove redundant tree" // Error in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeCouldNotCalculateContainerNodes = "could not calculate container nodes" // Error in ../node/pkg/services/tree/sync.go
|
||||||
|
TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation" // Error in ../node/pkg/services/tree/replicator.go
|
||||||
|
TreeDoNotSendUpdateToTheNode = "do not send update to the node" // Debug in ../node/pkg/services/tree/replicator.go
|
||||||
|
TreeFailedToSentUpdateToTheNode = "failed to sent update to the node" // Warn in ../node/pkg/services/tree/replicator.go
|
||||||
|
TreeErrorDuringReplication = "error during replication" // Error in ../node/pkg/services/tree/replicator.go
|
||||||
|
PersistentCouldNotGetSessionFromPersistentStorage = "could not get session from persistent storage" // Error in ../node/pkg/services/session/storage/persistent/storage.go
|
||||||
|
PersistentCouldNotDeleteSToken = "could not delete token" // Error in ../node/pkg/services/session/storage/persistent/storage.go
|
||||||
|
PersistentCouldNotCleanUpExpiredTokens = "could not clean up expired tokens" // Error in ../node/pkg/services/session/storage/persistent/storage.go
|
||||||
|
CommonStartBuildingManagers = "start building managers" // Debug in ../node/pkg/services/reputation/common/managers.go
|
||||||
|
ControllerReportIsAlreadyStarted = "report is already started" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerStartingToReportLocalTrustValues = "starting to report local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeIteratorOverLocalTrustValues = "could not initialize iterator over local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeLocalTrustTarget = "could not initialize local trust target" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerIteratorOverLocalTrustFailed = "iterator over local trust failed" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerCouldNotFinishWritingLocalTrustValues = "could not finish writing local trust values" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerReportingSuccessfullyFinished = "reporting successfully finished" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerReportingSuccessfullyInterrupted = "reporting successfully interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
ControllerReportingIsNotStartedOrAlreadyInterrupted = "reporting is not started or already interrupted" // Debug in ../node/pkg/services/reputation/local/controller/calls.go
|
||||||
|
RoutesBuildingNextStageForLocalTrustRoute = "building next stage for local trust route" // Debug in ../node/pkg/services/reputation/local/routes/calls.go
|
||||||
|
CalculatorFailedToGetAlphaParam = "failed to get alpha param" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorConsumersTrustIteratorsInitFailure = "consumers trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorWorkerPoolSubmitFailure = "worker pool submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorIterateDaughtersConsumersFailed = "iterate daughter's consumers failed" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorGetInitialTrustFailure = "get initial trust failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorDaughterTrustIteratorsInitFailure = "daughter trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorIterateOverDaughtersTrustsFailure = "iterate over daughter's trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorInitWriterFailure = "init writer failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorWriteFinalResultFailure = "write final result failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorWriteValueFailure = "write value failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorIterateDaughterTrustsFailure = "iterate daughter trusts failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorCouldNotCloseWriter = "could not close writer" // Error in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorAllDaughtersTrustIteratorsInitFailure = "all daughters trust iterator's init failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
CalculatorIterateOverAllDaughtersFailure = "iterate over all daughters failure" // Debug in ../node/pkg/services/reputation/eigentrust/calculator/calls.go
|
||||||
|
ControllerCouldNotGetEigenTrustIterationNumber = "could not get EigenTrust iteration number" // Error in ../node/pkg/services/reputation/eigentrust/controller/calls.go
|
||||||
|
ControllerIterationSubmitFailure = "iteration submit failure" // Debug in ../node/pkg/services/reputation/eigentrust/controller/calls.go
|
||||||
|
RoutesBuildingNextStageForTrustRoute = "building next stage for trust route" // Debug in ../node/pkg/services/reputation/eigentrust/routes/calls.go
|
||||||
|
RouterCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/reputation/common/router/calls.go
|
||||||
|
RouterCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
|
||||||
|
RouterCouldNotWriteTheValue = "could not write the value" // Debug in ../node/pkg/services/reputation/common/router/calls.go
|
||||||
|
RouterCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/reputation/common/router/calls.go
|
||||||
|
TombstoneCouldNotGetTheTombstoneTheSource = "tombstone getter: could not get the tombstone the source" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
|
||||||
|
TombstoneCouldNotParseTombstoneExpirationEpoch = "tombstone getter: could not parse tombstone expiration epoch" // Warn in ../node/pkg/services/object_manager/tombstone/checker.go
|
||||||
|
DeleteRequestIsNotRolledOverToTheContainer = "request is not rolled over to the container" // Debug in ../node/pkg/services/object/delete/container.go
|
||||||
|
DeleteServingRequest = "serving request..." // Debug in ../node/pkg/services/object/delete/delete.go
|
||||||
|
DeleteOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/delete/delete.go
|
||||||
|
DeleteOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/delete/delete.go
|
||||||
|
DeleteCouldNotComposeSplitInfo = "could not compose split info" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteNoSplitInfoObjectIsPHY = "no split info, object is PHY" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteAssemblingChain = "assembling chain..." // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCouldNotGetPreviousSplitElement = "could not get previous split element" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCollectingChildren = "collecting children..." // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCouldNotCollectObjectChildren = "could not collect object children" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteSupplementBySplitID = "supplement by split ID" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCouldNotSearchForSplitChainMembers = "could not search for split chain members" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCouldNotMarshalTombstoneStructure = "could not marshal tombstone structure" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteCouldNotSaveTheTombstone = "could not save the tombstone" // Debug in ../node/pkg/services/object/delete/exec.go
|
||||||
|
DeleteFormingTombstoneStructure = "forming tombstone structure..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
DeleteTombstoneStructureSuccessfullyFormedSaving = "tombstone structure successfully formed, saving..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
DeleteCouldNotReadTombstoneLifetimeConfig = "could not read tombstone lifetime config" // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
DeleteFormingSplitInfo = "forming split info..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
DeleteSplitInfoSuccessfullyFormedCollectingMembers = "split info successfully formed, collecting members..." // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
DeleteMembersSuccessfullyCollected = "members successfully collected" // Debug in ../node/pkg/services/object/delete/local.go
|
||||||
|
GetProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/get/remote.go
|
||||||
|
GetRemoteCallFailed = "remote call failed" // Debug in ../node/pkg/services/object/get/remote.go
|
||||||
|
GetCanNotAssembleTheObject = "can not assemble the object" // Debug in ../node/pkg/services/object/get/assemble.go
|
||||||
|
GetTryingToAssembleTheObject = "trying to assemble the object..." // Debug in ../node/pkg/services/object/get/assemble.go
|
||||||
|
GetAssemblingSplittedObject = "assembling splitted object..." // Debug in ../node/pkg/services/object/get/assemble.go
|
||||||
|
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" // Debug in ../node/pkg/services/object/get/assemble.go
|
||||||
|
GetFailedToAssembleSplittedObject = "failed to assemble splitted object" // Warn in ../node/pkg/services/object/get/assemble.go
|
||||||
|
GetCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/get/exec.go
|
||||||
|
GetCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/get/exec.go
|
||||||
|
GetCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/get/exec.go
|
||||||
|
GetCouldNotWriteHeader = "could not write header" // Debug in ../node/pkg/services/object/get/exec.go
|
||||||
|
GetCouldNotWritePayloadChunk = "could not write payload chunk" // Debug in ../node/pkg/services/object/get/exec.go
|
||||||
|
GetLocalGetFailed = "local get failed" // Debug in ../node/pkg/services/object/get/local.go
|
||||||
|
GetReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetCompletingTheOperation = "completing the operation" // Debug in ../node/pkg/services/object/get/container.go
|
||||||
|
GetServingRequest = "serving request..." // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
GetOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
GetRequestedObjectWasMarkedAsRemoved = "requested object was marked as removed" // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
GetRequestedObjectIsVirtual = "requested object is virtual" // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
GetRequestedRangeIsOutOfObjectBounds = "requested range is out of object bounds" // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
GetOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/get/get.go
|
||||||
|
PutAdditionalContainerBroadcastFailure = "additional container broadcast failure" // Error in ../node/pkg/services/object/put/distributed.go
|
||||||
|
SearchReturnResultDirectly = "return result directly" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchTryingToExecuteInContainer = "trying to execute in container..." // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchProcessEpoch = "process epoch" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchNoMoreNodesAbortPlacementIteration = "no more nodes, abort placement iteration" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchInterruptPlacementIterationByContext = "interrupt placement iteration by context" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchProcessingNode = "processing node..." // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchCouldNotConstructRemoteNodeClient = "could not construct remote node client" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchRemoteOperationFailed = "remote operation failed" // Debug in ../node/pkg/services/object/search/container.go
|
||||||
|
SearchCouldNotGetCurrentEpochNumber = "could not get current epoch number" // Debug in ../node/pkg/services/object/search/exec.go
|
||||||
|
SearchCouldNotGenerateContainerTraverser = "could not generate container traverser" // Debug in ../node/pkg/services/object/search/exec.go
|
||||||
|
SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" // Debug in ../node/pkg/services/object/search/exec.go
|
||||||
|
SearchLocalOperationFailed = "local operation failed" // Debug in ../node/pkg/services/object/search/local.go
|
||||||
|
SearchServingRequest = "serving request..." // Debug in ../node/pkg/services/object/search/search.go
|
||||||
|
SearchOperationFinishedWithError = "operation finished with error" // Debug in ../node/pkg/services/object/search/search.go
|
||||||
|
SearchOperationFinishedSuccessfully = "operation finished successfully" // Debug in ../node/pkg/services/object/search/search.go
|
||||||
|
UtilObjectServiceError = "object service error" // Error in ../node/pkg/services/object/util/log.go
|
||||||
|
UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" // Error in ../node/pkg/services/object/util/log.go
|
||||||
|
V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
|
||||||
|
V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" // Debug in ../node/pkg/services/object/acl/v2/classifier.go
|
||||||
|
NatsNatsConnectionWasLost = "nats: connection was lost" // Error in ../node/pkg/services/notificator/nats/service.go
|
||||||
|
NatsNatsReconnectedToTheServer = "nats: reconnected to the server" // Warn in ../node/pkg/services/notificator/nats/service.go
|
||||||
|
NatsNatsClosingConnectionAsTheContextIsDone = "nats: closing connection as the context is done" // Info in ../node/pkg/services/notificator/nats/service.go
|
||||||
|
ControllerStartingToAnnounceTheValuesOfTheMetrics = "starting to announce the values of the metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeIteratorOverLocallyCollectedMetrics = "could not initialize iterator over locally collected metrics" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeAnnouncementAccumulator = "could not initialize announcement accumulator" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerIteratorOverLocallyCollectedMetricsAborted = "iterator over locally collected metrics aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotFinishWritingLocalAnnouncements = "could not finish writing local announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerTrustAnnouncementSuccessfullyFinished = "trust announcement successfully finished" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerAnnouncementIsAlreadyStarted = "announcement is already started" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerAnnouncementSuccessfullyInterrupted = "announcement successfully interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerAnnouncementIsNotStartedOrAlreadyInterrupted = "announcement is not started or already interrupted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeIteratorOverLocallyAccumulatedAnnouncements = "could not initialize iterator over locally accumulated announcements" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotInitializeResultTarget = "could not initialize result target" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerIteratorOverLocalAnnouncementsAborted = "iterator over local announcements aborted" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
ControllerCouldNotFinishWritingLoadEstimations = "could not finish writing load estimations" // Debug in ../node/pkg/services/container/announcement/load/controller/calls.go
|
||||||
|
RouteCouldNotInitializeWriterProvider = "could not initialize writer provider" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
|
||||||
|
RouteCouldNotInitializeWriter = "could not initialize writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
|
||||||
|
RouteCouldNotPutTheValue = "could not put the value" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
|
||||||
|
RouteCouldNotCloseRemoteServerWriter = "could not close remote server writer" // Debug in ../node/pkg/services/container/announcement/load/route/calls.go
|
||||||
|
AuditorCouldNotGetObjectHeaderFromCandidate = "could not get object header from candidate" // Debug in ../node/pkg/services/audit/auditor/pop.go
|
||||||
|
AuditorCouldNotBuildPlacementForObject = "could not build placement for object" // Debug in ../node/pkg/services/audit/auditor/pop.go
|
||||||
|
AuditorCantHeadObject = "can't head object" // Debug in ../node/pkg/services/audit/auditor/por.go
|
||||||
|
AuditorCantConcatenateTzHash = "can't concatenate tz hash" // Debug in ../node/pkg/services/audit/auditor/por.go
|
||||||
|
AuditorStorageGroupSizeCheckFailed = "storage group size check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
|
||||||
|
AuditorStorageGroupTzHashCheckFailed = "storage group tz hash check failed" // Debug in ../node/pkg/services/audit/auditor/por.go
|
||||||
|
AuditorCantBuildPlacementForStorageGroupMember = "can't build placement for storage group member" // Info in ../node/pkg/services/audit/auditor/por.go
|
||||||
|
AuditorAuditContextIsDone = "audit context is done" // Debug in ../node/pkg/services/audit/auditor/context.go
|
||||||
|
AuditorWritingAuditReport = "writing audit report..." // Debug in ../node/pkg/services/audit/auditor/context.go
|
||||||
|
AuditorCouldNotWriteAuditReport = "could not write audit report" // Error in ../node/pkg/services/audit/auditor/context.go
|
||||||
|
AuditorSleepBeforeGetRangeHash = "sleep before get range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
|
||||||
|
AuditorCouldNotGetPayloadRangeHash = "could not get payload range hash" // Debug in ../node/pkg/services/audit/auditor/pdp.go
|
||||||
|
TaskmanagerProcessRoutine = "process routine" // Info in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
TaskmanagerStopListenerByContext = "stop listener by context" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
TaskmanagerQueueChannelIsClosed = "queue channel is closed" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
TaskmanagerCouldNotGeneratePDPWorkerPool = "could not generate PDP worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
TaskmanagerCouldNotGeneratePoRWorkerPool = "could not generate PoR worker pool" // Error in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
TaskmanagerCouldNotSubmitAuditTask = "could not submit audit task" // Warn in ../node/pkg/services/audit/taskmanager/listen.go
|
||||||
|
ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
|
||||||
|
ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch = "could not restore notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
|
||||||
|
ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch = "could not restore notary notification subscription after RPC switch" // Error in ../node/pkg/morph/client/notifications.go
|
||||||
|
ClientCouldNotEstablishConnectionToTheSwitchedRPCNode = "could not establish connection to the switched RPC node" // Warn in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientConnectionToTheNewRPCNodeHasBeenEstablished = "connection to the new RPC node has been established" // Info in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientSwitchingToTheNextRPCNode = "switching to the next RPC node" // Warn in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientCouldNotEstablishConnectionToAnyRPCNode = "could not establish connection to any RPC node" // Error in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientCouldNotCreateClientToTheHigherPriorityNode = "could not create client to the higher priority node" // Warn in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientSwitchedToTheHigherPriorityRPC = "switched to the higher priority RPC" // Info in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientCouldNotRestoreSideChainSubscriptionsUsingNode = "could not restore side chain subscriptions using node" // Warn in ../node/pkg/morph/client/multi.go
|
||||||
|
ClientNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/pkg/morph/client/notary.go
|
||||||
|
ClientNotaryDepositInvoke = "notary deposit invoke" // Info in ../node/pkg/morph/client/notary.go
|
||||||
|
ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" // Debug in ../node/pkg/morph/client/notary.go
|
||||||
|
ClientNotaryRequestInvoked = "notary request invoked" // Debug in ../node/pkg/morph/client/notary.go
|
||||||
|
ClientNeoClientInvoke = "neo client invoke" // Debug in ../node/pkg/morph/client/client.go
|
||||||
|
ClientNativeGasTransferInvoke = "native gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
|
||||||
|
ClientBatchGasTransferInvoke = "batch gas transfer invoke" // Debug in ../node/pkg/morph/client/client.go
|
||||||
|
ClientCantGetBlockchainHeight = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
|
||||||
|
ClientCantGetBlockchainHeight243 = "can't get blockchain height" // Error in ../node/pkg/morph/client/client.go
|
||||||
|
EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" // Warn in ../node/pkg/morph/event/utils.go
|
||||||
|
EventCouldNotStartListenToEvents = "could not start listen to events" // Error in ../node/pkg/morph/event/listener.go
|
||||||
|
EventStopEventListenerByError = "stop event listener by error" // Error in ../node/pkg/morph/event/listener.go
|
||||||
|
EventStopEventListenerByContext = "stop event listener by context" // Info in ../node/pkg/morph/event/listener.go
|
||||||
|
EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNilNotificationEventWasCaught = "nil notification event was caught" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventStopEventListenerByNotaryChannel = "stop event listener by notary channel" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNilNotaryEventWasCaught = "nil notary event was caught" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventStopEventListenerByBlockChannel = "stop event listener by block channel" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNilBlockWasCaught = "nil block was caught" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventListenerWorkerPoolDrained = "listener worker pool drained" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventEventParserNotSet = "event parser not set" // Debug in ../node/pkg/morph/event/listener.go
|
||||||
|
EventCouldNotParseNotificationEvent = "could not parse notification event" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNotificationHandlersForParsedNotificationEventWereNotRegistered = "notification handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
|
||||||
|
EventSkipExpiredMainTXNotaryEvent = "skip expired main TX notary event" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventCouldNotPrepareAndValidateNotaryEvent = "could not prepare and validate notary event" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNotaryParserNotSet = "notary parser not set" // Debug in ../node/pkg/morph/event/listener.go
|
||||||
|
EventCouldNotParseNotaryEvent = "could not parse notary event" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" // Info in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreNilEventParser = "ignore nil event parser" // Info in ../node/pkg/morph/event/listener.go
|
||||||
|
EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventRegisteredNewEventParser = "registered new event parser" // Debug in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreNilEventHandler = "ignore nil event handler" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventRegisteredNewEventHandler = "registered new event handler" // Debug in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" // Info in ../node/pkg/morph/event/listener.go
|
||||||
|
EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
EventIgnoreNilBlockHandler = "ignore nil block handler" // Warn in ../node/pkg/morph/event/listener.go
|
||||||
|
SubscriberUnsubscribeForNotification = "unsubscribe for notification" // Error in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberRemoteNotificationChannelHasBeenClosed = "remote notification channel has been closed" // Warn in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberCantCastNotifyEventValueToTheNotifyStruct = "can't cast notify event value to the notify struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberNewNotificationEventFromSidechain = "new notification event from sidechain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberCantCastBlockEventValueToBlock = "can't cast block event value to block" // Error in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberCantCastNotifyEventValueToTheNotaryRequestStruct = "can't cast notify event value to the notary request struct" // Error in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
SubscriberUnsupportedNotificationFromTheChain = "unsupported notification from the chain" // Debug in ../node/pkg/morph/subscriber/subscriber.go
|
||||||
|
BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaOpeningBoltDB = "opening BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaAlreadyInitialized = "already initialized" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaCreatingBucketForSizeRange = "creating bucket for size range" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaClosingBoltDB = "closing BoltDB" // Debug in ../node/pkg/local_object_storage/blobovnicza/control.go
|
||||||
|
BlobovniczaObjectWasRemovedFromBucket = "object was removed from bucket" // Debug in ../node/pkg/local_object_storage/blobovnicza/delete.go
|
||||||
|
BlobstorOpening = "opening..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
|
||||||
|
BlobstorInitializing = "initializing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
|
||||||
|
BlobstorClosing = "closing..." // Debug in ../node/pkg/local_object_storage/blobstor/control.go
|
||||||
|
BlobstorCouldntCloseStorage = "couldn't close storage" // Info in ../node/pkg/local_object_storage/blobstor/control.go
|
||||||
|
BlobstorErrorOccurredDuringObjectExistenceChecking = "error occurred during object existence checking" // Warn in ../node/pkg/local_object_storage/blobstor/exists.go
|
||||||
|
BlobstorErrorOccurredDuringTheIteration = "error occurred during the iteration" // Warn in ../node/pkg/local_object_storage/blobstor/iterate.go
|
||||||
|
EngineShardHasBeenRemoved = "shard has been removed" // Info in ../node/pkg/local_object_storage/engine/shards.go
|
||||||
|
EngineCouldNotCloseRemovedShard = "could not close removed shard" // Error in ../node/pkg/local_object_storage/engine/shards.go
|
||||||
|
EngineCouldNotOpenShardClosingAndSkipping = "could not open shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineCouldNotClosePartiallyInitializedShard = "could not close partially initialized shard" // Error in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineCouldNotInitializeShardClosingAndSkipping = "could not initialize shard, closing and skipping" // Error in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineCouldNotCloseShard = "could not close shard" // Debug in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineCouldNotReloadAShard = "could not reload a shard" // Error in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineAddedNewShard = "added new shard" // Info in ../node/pkg/local_object_storage/engine/control.go
|
||||||
|
EngineCouldNotMarkObjectForShardRelocation = "could not mark object for shard relocation" // Warn in ../node/pkg/local_object_storage/engine/put.go
|
||||||
|
EngineCouldNotPutObjectToShard = "could not put object to shard" // Warn in ../node/pkg/local_object_storage/engine/put.go
|
||||||
|
EngineErrorDuringSearchingForObjectChildren = "error during searching for object children" // Warn in ../node/pkg/local_object_storage/engine/delete.go
|
||||||
|
EngineCouldNotInhumeObjectInShard = "could not inhume object in shard" // Debug in ../node/pkg/local_object_storage/engine/delete.go
|
||||||
|
EngineStartingRemovalOfLocallyredundantCopies = "starting removal of locally-redundant copies" // Info in ../node/pkg/local_object_storage/engine/remove_copies.go
|
||||||
|
EngineStartedDuplicatesRemovalRoutine = "started duplicates removal routine" // Debug in ../node/pkg/local_object_storage/engine/remove_copies.go
|
||||||
|
EngineFinishedRemovalOfLocallyredundantCopies = "finished removal of locally-redundant copies" // Error in ../node/pkg/local_object_storage/engine/remove_copies.go
|
||||||
|
EngineRemovingAnObjectWithoutFullLockingCheck = "removing an object without full locking check" // Warn in ../node/pkg/local_object_storage/engine/inhume.go
|
||||||
|
EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
|
||||||
|
EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" // Info in ../node/pkg/local_object_storage/engine/inhume.go
|
||||||
|
EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" // Error in ../node/pkg/local_object_storage/engine/engine.go
|
||||||
|
EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" // Error in ../node/pkg/local_object_storage/engine/engine.go
|
||||||
|
EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
|
||||||
|
EngineShardIsMovedInDegradedModeDueToErrorThreshold = "shard is moved in degraded mode due to error threshold" // Info in ../node/pkg/local_object_storage/engine/engine.go
|
||||||
|
EngineModeChangeIsInProgressIgnoringSetmodeRequest = "mode change is in progress, ignoring set-mode request" // Debug in ../node/pkg/local_object_storage/engine/engine.go
|
||||||
|
EngineStartedShardsEvacuation = "started shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
|
||||||
|
EngineFinishedShardsEvacuation = "finished shards evacuation" // Info in ../node/pkg/local_object_storage/engine/evacuate.go
|
||||||
|
EngineObjectIsMovedToAnotherShard = "object is moved to another shard" // Debug in ../node/pkg/local_object_storage/engine/evacuate.go
|
||||||
|
MetabaseMissingMatcher = "missing matcher" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseErrorInFKBTSelection = "error in FKBT selection" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseUnknownOperation = "unknown operation" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" // Debug in ../node/pkg/local_object_storage/metabase/select.go
|
||||||
|
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
|
||||||
|
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" // Debug in ../node/pkg/local_object_storage/metabase/control.go
|
||||||
|
MetabaseCheckingMetabaseVersion = "checking metabase version" // Debug in ../node/pkg/local_object_storage/metabase/control.go
|
||||||
|
ShardCantSelectAllObjects = "can't select all objects" // Debug in ../node/pkg/local_object_storage/shard/list.go
|
||||||
|
ShardSettingShardMode = "setting shard mode" // Info in ../node/pkg/local_object_storage/shard/mode.go
|
||||||
|
ShardShardModeSetSuccessfully = "shard mode set successfully" // Info in ../node/pkg/local_object_storage/shard/mode.go
|
||||||
|
ShardCouldNotMarkObjectForShardRelocationInMetabase = "could not mark object for shard relocation in metabase" // Debug in ../node/pkg/local_object_storage/shard/move.go
|
||||||
|
ShardCantDeleteObjectFromWriteCache = "can't delete object from write cache" // Warn in ../node/pkg/local_object_storage/shard/delete.go
|
||||||
|
ShardCantGetStorageIDFromMetabase = "can't get storage ID from metabase" // Debug in ../node/pkg/local_object_storage/shard/delete.go
|
||||||
|
ShardCantRemoveObjectFromBlobStor = "can't remove object from blobStor" // Debug in ../node/pkg/local_object_storage/shard/delete.go
|
||||||
|
ShardFetchingObjectWithoutMeta = "fetching object without meta" // Warn in ../node/pkg/local_object_storage/shard/get.go
|
||||||
|
ShardObjectIsMissingInWritecache = "object is missing in write-cache" // Debug in ../node/pkg/local_object_storage/shard/get.go
|
||||||
|
ShardFailedToFetchObjectFromWritecache = "failed to fetch object from write-cache" // Error in ../node/pkg/local_object_storage/shard/get.go
|
||||||
|
ShardCantPutObjectToTheWritecacheTryingBlobstor = "can't put object to the write-cache, trying blobstor" // Debug in ../node/pkg/local_object_storage/shard/put.go
|
||||||
|
ShardMetaObjectCounterRead = "meta: object counter read" // Warn in ../node/pkg/local_object_storage/shard/shard.go
|
||||||
|
ShardMetaCantReadContainerList = "meta: can't read container list" // Warn in ../node/pkg/local_object_storage/shard/shard.go
|
||||||
|
ShardMetaCantReadContainerSize = "meta: can't read container size" // Warn in ../node/pkg/local_object_storage/shard/shard.go
|
||||||
|
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" // Error in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" // Error in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardCouldNotUnmarshalObject = "could not unmarshal object" // Warn in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardCouldNotCloseShardComponent = "could not close shard component" // Error in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" // Error in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" // Error in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardTryingToRestoreReadwriteMode = "trying to restore read-write mode" // Info in ../node/pkg/local_object_storage/shard/control.go
|
||||||
|
ShardStopEventListenerByClosedChannel = "stop event listener by closed channel" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotSubmitGCJobToWorkerPool = "could not submit GC job to worker pool" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardGCIsStopped = "GC is stopped" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardWaitingForGCWorkersToStop = "waiting for GC workers to stop..." // Info in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardIteratorOverMetabaseGraveyardFailed = "iterator over metabase graveyard failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotDeleteTheObjects = "could not delete the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardIteratorOverExpiredObjectsFailed = "iterator over expired objects failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotInhumeTheObjects = "could not inhume the objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardStartedExpiredTombstonesHandling = "started expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardIteratingTombstones = "iterating tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardShardIsInADegradedModeSkipCollectingExpiredTombstones = "shard is in a degraded mode, skip collecting expired tombstones" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardIteratorOverGraveyardFailed = "iterator over graveyard failed" // Error in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardHandlingExpiredTombstonesBatch = "handling expired tombstones batch" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardFinishedExpiredTombstonesHandling = "finished expired tombstones handling" // Debug in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardIteratorOverExpiredLocksFailed = "iterator over expired locks failed" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotMarkTombstonesAsGarbage = "could not mark tombstones as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotDropExpiredGraveRecords = "could not drop expired grave records" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardFailureToUnlockObjects = "failure to unlock objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" // Warn in ../node/pkg/local_object_storage/shard/gc.go
|
||||||
|
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" // Debug in ../node/pkg/local_object_storage/shard/inhume.go
|
||||||
|
WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" // Debug in ../node/pkg/local_object_storage/writecache/flush.go
|
||||||
|
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" // Info in ../node/pkg/local_object_storage/writecache/mode.go
|
||||||
|
WritecacheFillingFlushMarksForObjectsInFSTree = "filling flush marks for objects in FSTree" // Info in ../node/pkg/local_object_storage/writecache/init.go
|
||||||
|
WritecacheFinishedUpdatingFSTreeFlushMarks = "finished updating FSTree flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
|
||||||
|
WritecacheFillingFlushMarksForObjectsInDatabase = "filling flush marks for objects in database" // Info in ../node/pkg/local_object_storage/writecache/init.go
|
||||||
|
WritecacheFinishedUpdatingFlushMarks = "finished updating flush marks" // Info in ../node/pkg/local_object_storage/writecache/init.go
|
||||||
|
WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" // Error in ../node/pkg/local_object_storage/writecache/storage.go
|
||||||
|
WritecacheCantParseAddress = "can't parse address" // Error in ../node/pkg/local_object_storage/writecache/storage.go
|
||||||
|
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" // Error in ../node/pkg/local_object_storage/writecache/storage.go
|
||||||
|
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
|
||||||
|
BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza = "could not read payload range from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
|
||||||
|
BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza = "could not read payload range from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go
|
||||||
|
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" // Error in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
|
||||||
|
BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict = "blobovnicza successfully closed on evict" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
|
||||||
|
BlobovniczatreeUpdatingActiveBlobovnicza = "updating active blobovnicza..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
|
||||||
|
BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated = "active blobovnicza successfully updated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
|
||||||
|
BlobovniczatreeBlobovniczaSuccessfullyActivated = "blobovnicza successfully activated" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
|
||||||
|
BlobovniczatreeCouldNotRemoveObjectFromLevel = "could not remove object from level" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
|
||||||
|
BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza = "could not remove object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
|
||||||
|
BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza = "could not remove object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/delete.go
|
||||||
|
BlobovniczatreeCouldNotGetActiveBlobovnicza = "could not get active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
|
||||||
|
BlobovniczatreeBlobovniczaOverflowed = "blobovnicza overflowed" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
|
||||||
|
BlobovniczatreeCouldNotUpdateActiveBlobovnicza = "could not update active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
|
||||||
|
BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza = "could not put object to active blobovnicza" // reportError in ../node/pkg/local_object_storage/blobstor/blobovniczatree/put.go
|
||||||
|
BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza = "could not read object from opened blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
|
||||||
|
BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza = "could not get object from active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/get.go
|
||||||
|
BlobovniczatreeInitializingBlobovniczas = "initializing Blobovnicza's" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
|
||||||
|
BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization = "read-only mode, skip blobovniczas initialization..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
|
||||||
|
BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing = "blobovnicza successfully initialized, closing..." // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
|
||||||
|
BlobovniczatreeCouldNotCloseActiveBlobovnicza = "could not close active blobovnicza" // Debug in ../node/pkg/local_object_storage/blobstor/blobovniczatree/control.go
|
||||||
|
AlphabetTick = "tick" // Info in ../node/pkg/innerring/processors/alphabet/handlers.go
|
||||||
|
AlphabetAlphabetProcessorWorkerPoolDrained = "alphabet processor worker pool drained" // Warn in ../node/pkg/innerring/processors/alphabet/handlers.go
|
||||||
|
AlphabetNonAlphabetModeIgnoreGasEmissionEvent = "non alphabet mode, ignore gas emission event" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent = "node is out of alphabet range, ignore gas emission event" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetCantInvokeAlphabetEmitMethod = "can't invoke alphabet emit method" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetStorageNodeEmissionIsOff = "storage node emission is off" // Info in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes = "can't get netmap snapshot to emit gas to storage nodes" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetGasEmission = "gas emission" // Debug in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetCantParseNodePublicKey = "can't parse node public key" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetCantTransferGas = "can't transfer gas" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetCantTransferGasToWallet = "can't transfer gas to wallet" // Warn in ../node/pkg/innerring/processors/alphabet/process_emit.go
|
||||||
|
AlphabetAlphabetWorkerPool = "alphabet worker pool" // Debug in ../node/pkg/innerring/processors/alphabet/processor.go
|
||||||
|
AuditContainerListingFinished = "container listing finished" // Debug in ../node/pkg/innerring/processors/audit/scheduler.go
|
||||||
|
AuditNewRoundOfAudit = "new round of audit" // Info in ../node/pkg/innerring/processors/audit/handlers.go
|
||||||
|
AuditPreviousRoundOfAuditPrepareHasntFinishedYet = "previous round of audit prepare hasn't finished yet" // Warn in ../node/pkg/innerring/processors/audit/handlers.go
|
||||||
|
AuditSomeTasksFromPreviousEpochAreSkipped = "some tasks from previous epoch are skipped" // Info in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditContainerSelectionFailure = "container selection failure" // Error in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditSelectContainersForAudit = "select containers for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditCantFetchNetworkMap = "can't fetch network map" // Error in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditCantGetContainerInfoIgnore = "can't get container info, ignore" // Error in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditCantBuildPlacementForContainerIgnore = "can't build placement for container, ignore" // Info in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditSelectStorageGroupsForAudit = "select storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditFilterExpiredStorageGroupsForAudit = "filter expired storage groups for audit" // Info in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditParseClientNodeInfo = "parse client node info" // Warn in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditErrorInStorageGroupSearch = "error in storage group search" // Warn in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
AuditCouldNotGetStorageGroupObjectForAuditSkipping = "could not get storage group object for audit, skipping" // Error in ../node/pkg/innerring/processors/audit/process.go
|
||||||
|
BalanceNotification = "notification" // Info in ../node/pkg/innerring/processors/balance/handlers.go
|
||||||
|
BalanceBalanceWorkerPoolDrained = "balance worker pool drained" // Warn in ../node/pkg/innerring/processors/balance/handlers.go
|
||||||
|
BalanceNonAlphabetModeIgnoreBalanceLock = "non alphabet mode, ignore balance lock" // Info in ../node/pkg/innerring/processors/balance/process_assets.go
|
||||||
|
BalanceCantSendLockAssetTx = "can't send lock asset tx" // Error in ../node/pkg/innerring/processors/balance/process_assets.go
|
||||||
|
BalanceBalanceWorkerPool = "balance worker pool" // Debug in ../node/pkg/innerring/processors/balance/processor.go
|
||||||
|
ContainerContainerWorkerPool = "container worker pool" // Debug in ../node/pkg/innerring/processors/container/processor.go
|
||||||
|
ContainerNotification = "notification" // Info in ../node/pkg/innerring/processors/container/handlers.go
|
||||||
|
ContainerContainerProcessorWorkerPoolDrained = "container processor worker pool drained" // Warn in ../node/pkg/innerring/processors/container/handlers.go
|
||||||
|
ContainerNonAlphabetModeIgnoreContainerPut = "non alphabet mode, ignore container put" // Info in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerPutContainerCheckFailed = "put container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerCouldNotApprovePutContainer = "could not approve put container" // Error in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" // Info in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerDeleteContainerCheckFailed = "delete container check failed" // Error in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerCouldNotApproveDeleteContainer = "could not approve delete container" // Error in ../node/pkg/innerring/processors/container/process_container.go
|
||||||
|
ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" // Info in ../node/pkg/innerring/processors/container/process_eacl.go
|
||||||
|
ContainerSetEACLCheckFailed = "set EACL check failed" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
|
||||||
|
ContainerCouldNotApproveSetEACL = "could not approve set EACL" // Error in ../node/pkg/innerring/processors/container/process_eacl.go
|
||||||
|
FrostFSNonAlphabetModeIgnoreBind = "non alphabet mode, ignore bind" // Info in ../node/pkg/innerring/processors/frostfs/process_bind.go
|
||||||
|
FrostFSInvalidManageKeyEvent = "invalid manage key event" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
|
||||||
|
FrostFSCouldNotDecodeScriptHashFromBytes = "could not decode script hash from bytes" // Error in ../node/pkg/innerring/processors/frostfs/process_bind.go
|
||||||
|
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" // Info in ../node/pkg/innerring/processors/frostfs/process_config.go
|
||||||
|
FrostFSCantRelaySetConfigEvent = "can't relay set config event" // Error in ../node/pkg/innerring/processors/frostfs/process_config.go
|
||||||
|
FrostFSFrostfsWorkerPool = "frostfs worker pool" // Debug in ../node/pkg/innerring/processors/frostfs/processor.go
|
||||||
|
FrostFSNotification = "notification" // Info in ../node/pkg/innerring/processors/frostfs/handlers.go
|
||||||
|
FrostFSFrostfsProcessorWorkerPoolDrained = "frostfs processor worker pool drained" // Warn in ../node/pkg/innerring/processors/frostfs/handlers.go
|
||||||
|
FrostFSNonAlphabetModeIgnoreDeposit = "non alphabet mode, ignore deposit" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantTransferAssetsToBalanceContract = "can't transfer assets to balance contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSDoubleMintEmissionDeclined = "double mint emission declined" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantGetGasBalanceOfTheNode = "can't get gas balance of the node" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSGasBalanceThresholdHasBeenReached = "gas balance threshold has been reached" // Warn in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantTransferNativeGasToReceiver = "can't transfer native gas to receiver" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSNonAlphabetModeIgnoreWithdraw = "non alphabet mode, ignore withdraw" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantCreateLockAccount = "can't create lock account" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantLockAssetsForWithdraw = "can't lock assets for withdraw" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSNonAlphabetModeIgnoreCheque = "non alphabet mode, ignore cheque" // Info in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
FrostFSCantTransferAssetsToFedContract = "can't transfer assets to fed contract" // Error in ../node/pkg/innerring/processors/frostfs/process_assets.go
|
||||||
|
GovernanceNewEvent = "new event" // Info in ../node/pkg/innerring/processors/governance/handlers.go
|
||||||
|
GovernanceGovernanceWorkerPoolDrained = "governance worker pool drained" // Warn in ../node/pkg/innerring/processors/governance/handlers.go
|
||||||
|
GovernanceNonAlphabetModeIgnoreAlphabetSync = "non alphabet mode, ignore alphabet sync" // Info in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantFetchAlphabetListFromMainNet = "can't fetch alphabet list from main net" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantFetchAlphabetListFromSideChain = "can't fetch alphabet list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantMergeAlphabetListsFromMainNetAndSideChain = "can't merge alphabet lists from main net and side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged = "no governance update, alphabet list has not been changed" // Info in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceAlphabetListHasBeenChangedStartingUpdate = "alphabet list has been changed, starting update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantVoteForSideChainCommittee = "can't vote for side chain committee" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceFinishedAlphabetListUpdate = "finished alphabet list update" // Info in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantFetchInnerRingListFromSideChain = "can't fetch inner ring list from side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys = "can't create new inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceUpdateOfTheInnerRingList = "update of the inner ring list" // Info in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantUpdateInnerRingListWithNewAlphabetKeys = "can't update inner ring list with new alphabet keys" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantUpdateListOfNotaryNodesInSideChain = "can't update list of notary nodes in side chain" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract = "can't update list of alphabet nodes in frostfs contract" // Error in ../node/pkg/innerring/processors/governance/process_update.go
|
||||||
|
NetmapNetmapWorkerPool = "netmap worker pool" // Debug in ../node/pkg/innerring/processors/netmap/processor.go
|
||||||
|
NetmapTick = "tick" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||||
|
NetmapNetmapWorkerPoolDrained = "netmap worker pool drained" // Warn in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||||
|
NetmapNotification = "notification" // Info in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||||
|
NetmapNetmapCleanUpRoutineIsDisabled518 = "netmap clean up routine is disabled" // Debug in ../node/pkg/innerring/processors/netmap/handlers.go
|
||||||
|
NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick = "non alphabet mode, ignore new netmap cleanup tick" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||||
|
NetmapCantDecodePublicKeyOfNetmapNode = "can't decode public key of netmap node" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||||
|
NetmapVoteToRemoveNodeFromNetmap = "vote to remove node from netmap" // Info in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||||
|
NetmapCantInvokeNetmapUpdateState = "can't invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||||
|
NetmapCantIterateOnNetmapCleanerCache = "can't iterate on netmap cleaner cache" // Warn in ../node/pkg/innerring/processors/netmap/process_cleanup.go
|
||||||
|
NetmapCantGetEpochDuration = "can't get epoch duration" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapCantGetTransactionHeight = "can't get transaction height" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapCantResetEpochTimer = "can't reset epoch timer" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapCantStartContainerSizeEstimation = "can't start container size estimation" // Warn in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" // Info in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapNextEpoch = "next epoch" // Debug in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" // Error in ../node/pkg/innerring/processors/netmap/process_epoch.go
|
||||||
|
NetmapNonAlphabetModeIgnoreNewPeerNotification = "non alphabet mode, ignore new peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapNonhaltNotaryTransaction = "non-halt notary transaction" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCantParseNetworkMapCandidate = "can't parse network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate = "could not verify and update information about network map candidate" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapApprovingNetworkMapCandidate = "approving network map candidate" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCantInvokeNetmapAddPeer = "can't invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapNonAlphabetModeIgnoreUpdatePeerNotification = "non alphabet mode, ignore update peer notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapPreventSwitchingNodeToMaintenanceState = "prevent switching node to maintenance state" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCantInvokeNetmapUpdatePeer = "can't invoke netmap.UpdatePeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification = "non alphabet mode, ignore remove node from subnet notification" // Info in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotGetNetworkMapCandidates = "could not get network map candidates" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotUnmarshalSubnetId = "could not unmarshal subnet id" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapGotZeroSubnetInRemoveNodeNotification = "got zero subnet in remove node notification" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotIterateOverSubnetworksOfTheNode = "could not iterate over subnetworks of the node" // Warn in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotInvokeNetmapUpdateState = "could not invoke netmap.UpdateState" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
NetmapCouldNotInvokeNetmapAddPeer = "could not invoke netmap.AddPeer" // Error in ../node/pkg/innerring/processors/netmap/process_peers.go
|
||||||
|
ReputationNotification = "notification" // Info in ../node/pkg/innerring/processors/reputation/handlers.go
|
||||||
|
ReputationReputationWorkerPoolDrained = "reputation worker pool drained" // Warn in ../node/pkg/innerring/processors/reputation/handlers.go
|
||||||
|
ReputationNonAlphabetModeIgnoreReputationPutNotification = "non alphabet mode, ignore reputation put notification" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
|
||||||
|
ReputationIgnoreReputationValue = "ignore reputation value" // Info in ../node/pkg/innerring/processors/reputation/process_put.go
|
||||||
|
ReputationCantSendApprovalTxForReputationValue = "can't send approval tx for reputation value" // Warn in ../node/pkg/innerring/processors/reputation/process_put.go
|
||||||
|
ReputationReputationWorkerPool = "reputation worker pool" // Debug in ../node/pkg/innerring/processors/reputation/processor.go
|
||||||
|
SettlementNonAlphabetModeIgnoreAuditPayments = "non alphabet mode, ignore audit payments" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementNewAuditSettlementEvent = "new audit settlement event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementIgnoreGenesisEpoch = "ignore genesis epoch" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementCouldNotAddHandlerOfAuditEventToQueue = "could not add handler of AuditEvent to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementAuditEventHandlingSuccessfullyScheduled = "AuditEvent handling successfully scheduled" // Debug in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementNonAlphabetModeIgnoreIncomeCollectionEvent = "non alphabet mode, ignore income collection event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementStartBasicIncomeCollection = "start basic income collection" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementIncomeContextAlreadyExists = "income context already exists" // Error in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementCantCreateIncomeContext = "can't create income context" // Error in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue = "could not add handler of basic income collection to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementNonAlphabetModeIgnoreIncomeDistributionEvent = "non alphabet mode, ignore income distribution event" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementStartBasicIncomeDistribution = "start basic income distribution" // Info in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementIncomeContextDistributionDoesNotExists = "income context distribution does not exists" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue = "could not add handler of basic income distribution to queue" // Warn in ../node/pkg/innerring/processors/settlement/calls.go
|
||||||
|
SettlementProcessAuditSettlements = "process audit settlements" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
|
||||||
|
SettlementAuditProcessingFinished = "audit processing finished" // Info in ../node/pkg/innerring/processors/settlement/handlers.go
|
||||||
|
SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized = "worker pool for settlement processor successfully initialized" // Debug in ../node/pkg/innerring/processors/settlement/processor.go
|
||||||
|
AuditSettlementsAreIgnoredForZeroEpoch = "settlements are ignored for zero epoch" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCalculateAuditSettlements = "calculate audit settlements" // Info in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditGettingResultsForThePreviousEpoch = "getting results for the previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotCollectAuditResults = "could not collect audit results" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditNoAuditResultsInPreviousEpoch = "no audit results in previous epoch" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCantFetchAuditFeeFromNetworkConfig = "can't fetch audit fee from network config" // Warn in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditProcessingAuditResults = "processing audit results" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditProcessingTransfers = "processing transfers" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditReadingInformationAboutTheContainer = "reading information about the container" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditBuildingPlacement = "building placement" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCollectingPassedNodes = "collecting passed nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCalculatingSumOfTheSizesOfAllStorageGroups = "calculating sum of the sizes of all storage groups" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditFillingTransferTable = "filling transfer table" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditMissingContainerInAuditResult = "missing container in audit result" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotGetContainerInfo = "could not get container info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotGetContainerNodes = "could not get container nodes" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditEmptyListOfContainerNodes = "empty list of container nodes" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditNoneOfTheContainerNodesPassedTheAudit = "none of the container nodes passed the audit" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotGetSGInfo = "could not get storage group info" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditZeroSumSGSize = "zero sum storage group size" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotResolvePublicKeyOfTheStorageNode = "could not resolve public key of the storage node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCalculatingStorageNodeSalaryForAudit = "calculating storage node salary for audit (GASe-12)" // Debug in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
AuditCouldNotParsePublicKeyOfTheInnerRingNode = "could not parse public key of the inner ring node" // Error in ../node/pkg/innerring/processors/settlement/audit/calculate.go
|
||||||
|
BasicCantGetBasicIncomeRate = "can't get basic income rate" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||||
|
BasicCantFetchContainerSizeEstimations = "can't fetch container size estimations" // Error in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||||
|
BasicCantFetchContainerInfo = "can't fetch container info" // Warn in ../node/pkg/innerring/processors/settlement/basic/collect.go
|
||||||
|
BasicCantFetchBalanceOfBankingAccount = "can't fetch balance of banking account" // Error in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||||
|
BasicCantTransformPublicKeyToOwnerID = "can't transform public key to owner id" // Warn in ../node/pkg/innerring/processors/settlement/basic/distribute.go
|
||||||
|
FrostFSIRInternalError = "internal error" // Info in ../node/cmd/frostfs-ir/main.go
|
||||||
|
FrostFSIRCouldNotShutdownHTTPServer = "could not shutdown HTTP server" // Debug in ../node/cmd/frostfs-ir/main.go
|
||||||
|
FrostFSIRApplicationStopped = "application stopped" // Info in ../node/cmd/frostfs-ir/main.go
|
||||||
|
FrostFSNodeCouldNotReadCertificateFromFile = "could not read certificate from file" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeCantListenGRPCEndpoint = "can't listen gRPC endpoint" // Error in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeStopListeningGRPCEndpoint = "stop listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeStartListeningGRPCEndpoint = "start listening gRPC endpoint" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeStoppingGRPCServer = "stopping gRPC server..." // Info in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeGRPCCannotShutdownGracefullyForcingStop = "gRPC cannot shutdown gracefully, forcing stop" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeGRPCServerStoppedSuccessfully = "gRPC server stopped successfully" // Info in ../node/cmd/frostfs-node/grpc.go
|
||||||
|
FrostFSNodeWaitingForAllProcessesToStop = "waiting for all processes to stop" // Debug in ../node/cmd/frostfs-node/main.go
|
||||||
|
FrostFSNodeStartReportingReputationOnNewEpochEvent = "start reporting reputation on new epoch event" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||||
|
FrostFSNodeCouldNotFetchEpochDuration = "could not fetch epoch duration" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||||
|
FrostFSNodeCouldNotFetchIterationNumber = "could not fetch iteration number" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||||
|
FrostFSNodeCouldNotCreateFixedEpochTimer = "could not create fixed epoch timer" // Debug in ../node/cmd/frostfs-node/reputation.go
|
||||||
|
FrostFSNodeStartedLocalNodesMaintenance = "started local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeStoppedLocalNodesMaintenance = "stopped local node's maintenance" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeFailedToAttachShardToEngine = "failed to attach shard to engine" // Error in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeShardAttachedToEngine = "shard attached to engine" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeClosingComponentsOfTheStorageEngine = "closing components of the storage engine..." // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeStorageEngineClosingFailure = "storage engine closing failure" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully = "all components of the storage engine closed successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeBootstrappingWithTheMaintenanceState = "bootstrapping with the maintenance state" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeBootstrappingWithOnlineState = "bootstrapping with online state" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeTerminationSignalHasBeenReceivedStopping = "termination signal has been received, stopping..." // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeTerminationSignalProcessingIsComplete = "termination signal processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeInternalApplicationError = "internal application error" // Warn in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeInternalErrorProcessingIsComplete = "internal error processing is complete" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration = "SIGHUP has been received, rereading configuration..." // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeConfigurationReading = "configuration reading" // Error in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" // Error in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeTracingConfigationUpdated = "tracing configation updated" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" // Error in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeUpdatedConfigurationApplying = "updated configuration applying" // Error in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeConfigurationHasBeenReloadedSuccessfully = "configuration has been reloaded successfully" // Info in ../node/cmd/frostfs-node/config.go
|
||||||
|
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" // Error in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" // Debug in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeSaveUsedSpaceAnnouncementInContract = "save used space announcement in contract" // Debug in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeFailedToCalculateContainerSizeInStorageEngine = "failed to calculate container size in storage engine" // Debug in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeContainerSizeInStorageEngineCalculatedSuccessfully = "container size in storage engine calculated successfully" // Debug in ../node/cmd/frostfs-node/container.go
|
||||||
|
FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||||
|
FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||||
|
FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" // Error in ../node/cmd/frostfs-node/notificator.go
|
||||||
|
FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" // Debug in ../node/cmd/frostfs-node/notificator.go
|
||||||
|
FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" // Warn in ../node/cmd/frostfs-node/notificator.go
|
||||||
|
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" // Error in ../node/cmd/frostfs-node/object.go
|
||||||
|
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" // Warn in ../node/cmd/frostfs-node/object.go
|
||||||
|
FrostFSNodeWritingLocalReputationValues = "writing local reputation values" // Debug in ../node/cmd/frostfs-node/object.go
|
||||||
|
FrostFSNodeCouldNotGetLatestNetworkMapToOverloadTheClient = "could not get latest network map to overload the client" // Warn in ../node/cmd/frostfs-node/object.go
|
||||||
|
FrostFSNodeFailedInitTracing = "failed init tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||||
|
FrostFSNodeFailedShutdownTracing = "failed shutdown tracing" // Error in ../node/cmd/frostfs-node/tracing.go
|
||||||
|
FrostFSNodeFailedToCreateNeoRPCClient = "failed to create neo RPC client" // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeClosingMorphComponents = "closing morph components..." // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal = "failed to set group signer scope, continue with Global" // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeNotarySupport = "notary support" // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeMorphcacheTTLFetchedFromNetwork = "morph.cache_ttl fetched from network" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeCantGetLastProcessedSideChainBlockNumber = "can't get last processed side chain block number" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeNewEpochEventFromSidechain = "new epoch event from sidechain" // Info in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeNewBlock = "new block" // Debug in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeCantUpdatePersistentState = "can't update persistent state" // Warn in ../node/cmd/frostfs-node/morph.go
|
||||||
|
FrostFSNodeCantSendRebootstrapTx = "can't send re-bootstrap tx" // Warn in ../node/cmd/frostfs-node/netmap.go
|
||||||
|
FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch = "could not update node state on new epoch" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||||
|
FrostFSNodeCouldNotMakeNotaryDeposit = "could not make notary deposit" // Error in ../node/cmd/frostfs-node/netmap.go
|
||||||
|
FrostFSNodeInitialNetworkState = "initial network state" // Info in ../node/cmd/frostfs-node/netmap.go
|
||||||
|
FrostFSNodeTreeServiceIsNotEnabledSkipInitialization = "tree service is not enabled, skip initialization" // Info in ../node/cmd/frostfs-node/tree.go
|
||||||
|
FrostFSNodeCouldNotSynchronizeTreeService = "could not synchronize Tree Service" // Error in ../node/cmd/frostfs-node/tree.go
|
||||||
|
FrostFSNodeRemovingAllTreesForContainer = "removing all trees for container" // Debug in ../node/cmd/frostfs-node/tree.go
|
||||||
|
FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved = "container removal event received, but trees weren't removed" // Error in ../node/cmd/frostfs-node/tree.go
|
||||||
|
FrostFSNodeCantListenGRPCEndpointControl = "can't listen gRPC endpoint (control)" // Error in ../node/cmd/frostfs-node/control.go
|
||||||
|
CommonApplicationStarted = "application started" // Info in ../node/cmd/frostfs-ir/main.go
|
||||||
|
CommonInitializingRemoteWriterProvider = "initializing remote writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||||
|
CommonRouteHasReachedDeadendProvider = "route has reached dead-end provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||||
|
CommonInitializingNoopWriterProvider = "initializing no-op writer provider" // Debug in ../node/cmd/frostfs-node/reputation/common/remote.go
|
||||||
|
IntermediateWritingReceivedConsumersTrusts = "writing received consumer's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/consumers.go
|
||||||
|
IntermediateStartWritingGlobalTrustsToContract = "start writing global trusts to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||||
|
IntermediateFailedToSignGlobalTrust = "failed to sign global trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||||
|
IntermediateFailedToWriteGlobalTrustToContract = "failed to write global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||||
|
IntermediateSentGlobalTrustToContract = "sent global trust to contract" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/contract.go
|
||||||
|
IntermediateWritingReceivedDaughtersTrusts = "writing received daughter's trusts" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/daughters.go
|
||||||
|
IntermediateAnnouncingTrust = "announcing trust" // Debug in ../node/cmd/frostfs-node/reputation/intermediate/remote.go
|
||||||
|
LocalAnnouncingTrusts = "announcing trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/remote.go
|
||||||
|
LocalInitializingIteratorOverTrusts = "initializing iterator over trusts" // Debug in ../node/cmd/frostfs-node/reputation/local/storage.go
|
||||||
|
)
|
|
@ -3,6 +3,7 @@ package innerring
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
|
||||||
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
||||||
|
@ -98,7 +99,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
|
||||||
args.stopEstimationDDiv,
|
args.stopEstimationDDiv,
|
||||||
func() {
|
func() {
|
||||||
if !args.alphabetState.IsAlphabet() {
|
if !args.alphabetState.IsAlphabet() {
|
||||||
args.l.Debug("non-alphabet mode, do not stop container estimations")
|
args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +113,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer {
|
||||||
|
|
||||||
err := args.cnrWrapper.StopEstimation(prm)
|
err := args.cnrWrapper.StopEstimation(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
args.l.Warn("can't stop epoch estimation",
|
args.l.Warn(logs.InnerringCantStopEpochEstimation,
|
||||||
zap.Uint64("epoch", epochN),
|
zap.Uint64("epoch", epochN),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/balance"
|
||||||
|
@ -129,7 +130,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
|
||||||
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromMainChainBlock = 0
|
fromMainChainBlock = 0
|
||||||
s.log.Warn("can't get last processed main chain block number", zap.String("error", err.Error()))
|
s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
mainnetChain.from = fromMainChainBlock
|
mainnetChain.from = fromMainChainBlock
|
||||||
|
|
||||||
|
@ -177,7 +178,7 @@ func (s *Server) initNotaryConfig(cfg *viper.Viper) {
|
||||||
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
|
!s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too
|
||||||
)
|
)
|
||||||
|
|
||||||
s.log.Info("notary support",
|
s.log.Info(logs.InnerringNotarySupport,
|
||||||
zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled),
|
zap.Bool("sidechain_enabled", !s.sideNotaryConfig.disabled),
|
||||||
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
|
zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled),
|
||||||
)
|
)
|
||||||
|
@ -275,7 +276,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
|
||||||
|
|
||||||
if s.withoutMainNet || cfg.GetBool("governance.disable") {
|
if s.withoutMainNet || cfg.GetBool("governance.disable") {
|
||||||
alphaSync = func(event.Event) {
|
alphaSync = func(event.Event) {
|
||||||
s.log.Debug("alphabet keys sync is disabled")
|
s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// create governance processor
|
// create governance processor
|
||||||
|
@ -496,7 +497,7 @@ func (s *Server) initReputationProcessor(cfg *viper.Viper, sidechainFee fixedn.F
|
||||||
func (s *Server) initGRPCServer(cfg *viper.Viper) error {
|
func (s *Server) initGRPCServer(cfg *viper.Viper) error {
|
||||||
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
|
controlSvcEndpoint := cfg.GetString("control.grpc.endpoint")
|
||||||
if controlSvcEndpoint == "" {
|
if controlSvcEndpoint == "" {
|
||||||
s.log.Info("no Control server endpoint specified, service is disabled")
|
s.log.Info(logs.InnerringNoControlServerEndpointSpecified)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,7 +693,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fromSideChainBlock = 0
|
fromSideChainBlock = 0
|
||||||
s.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error()))
|
s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
morphChain := &chainParams{
|
morphChain := &chainParams{
|
||||||
|
@ -715,7 +716,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := s.morphClient.SetGroupSignerScope(); err != nil {
|
if err := s.morphClient.SetGroupSignerScope(); err != nil {
|
||||||
morphChain.log.Info("failed to set group signer scope, continue with Global", zap.Error(err))
|
morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return morphChain, nil
|
return morphChain, nil
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap"
|
||||||
|
@ -168,7 +169,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
err = s.voteForSidechainValidator(prm)
|
err = s.voteForSidechainValidator(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// we don't stop inner ring execution on this error
|
// we don't stop inner ring execution on this error
|
||||||
s.log.Warn("can't vote for prepared validators",
|
s.log.Warn(logs.InnerringCantVoteForPreparedValidators,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,13 +211,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
|
||||||
|
|
||||||
func (s *Server) registerMorphNewBlockEventHandler() {
|
func (s *Server) registerMorphNewBlockEventHandler() {
|
||||||
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
|
s.morphListener.RegisterBlockHandler(func(b *block.Block) {
|
||||||
s.log.Debug("new block",
|
s.log.Debug(logs.InnerringNewBlock,
|
||||||
zap.Uint32("index", b.Index),
|
zap.Uint32("index", b.Index),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
|
err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("can't update persistent state",
|
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
||||||
zap.String("chain", "side"),
|
zap.String("chain", "side"),
|
||||||
zap.Uint32("block_index", b.Index))
|
zap.Uint32("block_index", b.Index))
|
||||||
}
|
}
|
||||||
|
@ -230,7 +231,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() {
|
||||||
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
|
s.mainnetListener.RegisterBlockHandler(func(b *block.Block) {
|
||||||
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
|
err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("can't update persistent state",
|
s.log.Warn(logs.InnerringCantUpdatePersistentState,
|
||||||
zap.String("chain", "main"),
|
zap.String("chain", "main"),
|
||||||
zap.Uint32("block_index", b.Index))
|
zap.Uint32("block_index", b.Index))
|
||||||
}
|
}
|
||||||
|
@ -302,7 +303,7 @@ func (s *Server) Stop() {
|
||||||
|
|
||||||
for _, c := range s.closers {
|
for _, c := range s.closers {
|
||||||
if err := c(); err != nil {
|
if err := c(); err != nil {
|
||||||
s.log.Warn("closer error",
|
s.log.Warn(logs.InnerringCloserError,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -547,7 +548,7 @@ func (s *Server) initConfigFromBlockchain() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Debug("read config from blockchain",
|
s.log.Debug(logs.InnerringReadConfigFromBlockchain,
|
||||||
zap.Bool("active", s.IsActive()),
|
zap.Bool("active", s.IsActive()),
|
||||||
zap.Bool("alphabet", s.IsAlphabet()),
|
zap.Bool("alphabet", s.IsAlphabet()),
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
@ -52,14 +53,14 @@ func (s *Server) notaryHandler(_ event.Event) {
|
||||||
if !s.mainNotaryConfig.disabled {
|
if !s.mainNotaryConfig.disabled {
|
||||||
_, err := s.depositMainNotary()
|
_, err := s.depositMainNotary()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't make notary deposit in main chain", zap.Error(err))
|
s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !s.sideNotaryConfig.disabled {
|
if !s.sideNotaryConfig.disabled {
|
||||||
_, err := s.depositSideNotary()
|
_, err := s.depositSideNotary()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't make notary deposit in side chain", zap.Error(err))
|
s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,7 +83,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
|
||||||
// non-error deposit with an empty TX hash means
|
// non-error deposit with an empty TX hash means
|
||||||
// that the deposit has already been made; no
|
// that the deposit has already been made; no
|
||||||
// need to wait it.
|
// need to wait it.
|
||||||
s.log.Info("notary deposit has already been made")
|
s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package alphabet
|
package alphabet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -8,14 +9,14 @@ import (
|
||||||
|
|
||||||
func (ap *Processor) HandleGasEmission(ev event.Event) {
|
func (ap *Processor) HandleGasEmission(ev event.Event) {
|
||||||
_ = ev.(timers.NewAlphabetEmitTick)
|
_ = ev.(timers.NewAlphabetEmitTick)
|
||||||
ap.log.Info("tick", zap.String("type", "alphabet gas emit"))
|
ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit"))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := ap.pool.Submit(func() { ap.processEmit() })
|
err := ap.pool.Submit(func() { ap.processEmit() })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
ap.log.Warn("alphabet processor worker pool drained",
|
ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", ap.pool.Cap()))
|
zap.Int("capacity", ap.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package alphabet
|
||||||
import (
|
import (
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
|
@ -14,14 +15,14 @@ const emitMethod = "emit"
|
||||||
func (ap *Processor) processEmit() {
|
func (ap *Processor) processEmit() {
|
||||||
index := ap.irList.AlphabetIndex()
|
index := ap.irList.AlphabetIndex()
|
||||||
if index < 0 {
|
if index < 0 {
|
||||||
ap.log.Info("non alphabet mode, ignore gas emission event")
|
ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
contract, ok := ap.alphabetContracts.GetByIndex(index)
|
contract, ok := ap.alphabetContracts.GetByIndex(index)
|
||||||
if !ok {
|
if !ok {
|
||||||
ap.log.Debug("node is out of alphabet range, ignore gas emission event",
|
ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent,
|
||||||
zap.Int("index", index))
|
zap.Int("index", index))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -30,20 +31,20 @@ func (ap *Processor) processEmit() {
|
||||||
// there is no signature collecting, so we don't need extra fee
|
// there is no signature collecting, so we don't need extra fee
|
||||||
err := ap.morphClient.Invoke(contract, 0, emitMethod)
|
err := ap.morphClient.Invoke(contract, 0, emitMethod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error()))
|
ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if ap.storageEmission == 0 {
|
if ap.storageEmission == 0 {
|
||||||
ap.log.Info("storage node emission is off")
|
ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
networkMap, err := ap.netmapClient.NetMap()
|
networkMap, err := ap.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes",
|
ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -53,7 +54,7 @@ func (ap *Processor) processEmit() {
|
||||||
nmLen := len(nmNodes)
|
nmLen := len(nmNodes)
|
||||||
extraLen := len(ap.parsedWallets)
|
extraLen := len(ap.parsedWallets)
|
||||||
|
|
||||||
ap.log.Debug("gas emission",
|
ap.log.Debug(logs.AlphabetGasEmission,
|
||||||
zap.Int("network_map", nmLen),
|
zap.Int("network_map", nmLen),
|
||||||
zap.Int("extra_wallets", extraLen))
|
zap.Int("extra_wallets", extraLen))
|
||||||
|
|
||||||
|
@ -74,7 +75,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
||||||
|
|
||||||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn("can't parse node public key",
|
ap.log.Warn(logs.AlphabetCantParseNodePublicKey,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -82,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN
|
||||||
|
|
||||||
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
|
err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn("can't transfer gas",
|
ap.log.Warn(logs.AlphabetCantTransferGas,
|
||||||
zap.String("receiver", key.Address()),
|
zap.String("receiver", key.Address()),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
|
@ -99,7 +100,7 @@ func (ap *Processor) transferGasToExtraNodes(extraLen int, gasPerNode fixedn.Fix
|
||||||
for i, addr := range ap.parsedWallets {
|
for i, addr := range ap.parsedWallets {
|
||||||
receiversLog[i] = addr.StringLE()
|
receiversLog[i] = addr.StringLE()
|
||||||
}
|
}
|
||||||
ap.log.Warn("can't transfer gas to wallet",
|
ap.log.Warn(logs.AlphabetCantTransferGasToWallet,
|
||||||
zap.Strings("receivers", receiversLog),
|
zap.Strings("receivers", receiversLog),
|
||||||
zap.Int64("amount", int64(gasPerNode)),
|
zap.Int64("amount", int64(gasPerNode)),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -67,7 +68,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/alphabet: global state is not set")
|
return nil, errors.New("ir/alphabet: global state is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("alphabet worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package audit
|
package audit
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -10,12 +11,12 @@ func (ap *Processor) handleNewAuditRound(ev event.Event) {
|
||||||
|
|
||||||
epoch := auditEvent.Epoch()
|
epoch := auditEvent.Epoch()
|
||||||
|
|
||||||
ap.log.Info("new round of audit", zap.Uint64("epoch", epoch))
|
ap.log.Info(logs.AuditNewRoundOfAudit, zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := ap.pool.Submit(func() { ap.processStartAudit(epoch) })
|
err := ap.pool.Submit(func() { ap.processStartAudit(epoch) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Warn("previous round of audit prepare hasn't finished yet")
|
ap.log.Warn(logs.AuditPreviousRoundOfAuditPrepareHasntFinishedYet)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
|
||||||
|
@ -24,23 +25,23 @@ func (ap *Processor) processStartAudit(epoch uint64) {
|
||||||
|
|
||||||
skipped := ap.taskManager.Reset()
|
skipped := ap.taskManager.Reset()
|
||||||
if skipped > 0 {
|
if skipped > 0 {
|
||||||
ap.log.Info("some tasks from previous epoch are skipped",
|
ap.log.Info(logs.AuditSomeTasksFromPreviousEpochAreSkipped,
|
||||||
zap.Int("amount", skipped),
|
zap.Int("amount", skipped),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
containers, err := ap.selectContainersToAudit(epoch)
|
containers, err := ap.selectContainersToAudit(epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("container selection failure", zap.String("error", err.Error()))
|
log.Error(logs.AuditContainerSelectionFailure, zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("select containers for audit", zap.Int("amount", len(containers)))
|
log.Info(logs.AuditSelectContainersForAudit, zap.Int("amount", len(containers)))
|
||||||
|
|
||||||
nm, err := ap.netmapClient.GetNetMap(0)
|
nm, err := ap.netmapClient.GetNetMap(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ap.log.Error("can't fetch network map",
|
ap.log.Error(logs.AuditCantFetchNetworkMap,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -64,7 +65,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
|
||||||
for i := range containers {
|
for i := range containers {
|
||||||
cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
|
cnr, err := cntClient.Get(ap.containerClient, containers[i]) // get container structure
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("can't get container info, ignore",
|
log.Error(logs.AuditCantGetContainerInfoIgnore,
|
||||||
zap.Stringer("cid", containers[i]),
|
zap.Stringer("cid", containers[i]),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
@ -76,7 +77,7 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
|
||||||
// find all container nodes for current epoch
|
// find all container nodes for current epoch
|
||||||
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot)
|
nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), pivot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info("can't build placement for container, ignore",
|
log.Info(logs.AuditCantBuildPlacementForContainerIgnore,
|
||||||
zap.Stringer("cid", containers[i]),
|
zap.Stringer("cid", containers[i]),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
@ -92,13 +93,13 @@ func (ap *Processor) startAuditTasksOnContainers(cancelChannel <-chan struct{},
|
||||||
|
|
||||||
// search storage groups
|
// search storage groups
|
||||||
storageGroupsIDs := ap.findStorageGroups(containers[i], n)
|
storageGroupsIDs := ap.findStorageGroups(containers[i], n)
|
||||||
log.Info("select storage groups for audit",
|
log.Info(logs.AuditSelectStorageGroupsForAudit,
|
||||||
zap.Stringer("cid", containers[i]),
|
zap.Stringer("cid", containers[i]),
|
||||||
zap.Int("amount", len(storageGroupsIDs)))
|
zap.Int("amount", len(storageGroupsIDs)))
|
||||||
|
|
||||||
// filter expired storage groups
|
// filter expired storage groups
|
||||||
storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm)
|
storageGroups := ap.filterExpiredSG(containers[i], storageGroupsIDs, nodes, *nm)
|
||||||
log.Info("filter expired storage groups for audit",
|
log.Info(logs.AuditFilterExpiredStorageGroupsForAudit,
|
||||||
zap.Stringer("cid", containers[i]),
|
zap.Stringer("cid", containers[i]),
|
||||||
zap.Int("amount", len(storageGroups)))
|
zap.Int("amount", len(storageGroups)))
|
||||||
|
|
||||||
|
@ -146,7 +147,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
|
||||||
|
|
||||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
|
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("parse client node info", zap.String("error", err.Error()))
|
log.Warn(logs.AuditParseClientNodeInfo, zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -162,7 +163,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("error in storage group search", zap.String("error", err.Error()))
|
log.Warn(logs.AuditErrorInStorageGroupSearch, zap.String("error", err.Error()))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +21,7 @@ func (ap *Processor) selectContainersToAudit(epoch uint64) ([]cid.ID, error) {
|
||||||
|
|
||||||
// consider getting extra information about container complexity from
|
// consider getting extra information about container complexity from
|
||||||
// audit contract there
|
// audit contract there
|
||||||
ap.log.Debug("container listing finished",
|
ap.log.Debug(logs.AuditContainerListingFinished,
|
||||||
zap.Int("total amount", len(containers)),
|
zap.Int("total amount", len(containers)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package balance
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -10,7 +11,7 @@ import (
|
||||||
|
|
||||||
func (bp *Processor) handleLock(ev event.Event) {
|
func (bp *Processor) handleLock(ev event.Event) {
|
||||||
lock := ev.(balanceEvent.Lock)
|
lock := ev.(balanceEvent.Lock)
|
||||||
bp.log.Info("notification",
|
bp.log.Info(logs.BalanceNotification,
|
||||||
zap.String("type", "lock"),
|
zap.String("type", "lock"),
|
||||||
zap.String("value", hex.EncodeToString(lock.ID())))
|
zap.String("value", hex.EncodeToString(lock.ID())))
|
||||||
|
|
||||||
|
@ -19,7 +20,7 @@ func (bp *Processor) handleLock(ev event.Event) {
|
||||||
err := bp.pool.Submit(func() { bp.processLock(&lock) })
|
err := bp.pool.Submit(func() { bp.processLock(&lock) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
bp.log.Warn("balance worker pool drained",
|
bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained,
|
||||||
zap.Int("capacity", bp.pool.Cap()))
|
zap.Int("capacity", bp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||||
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -10,7 +11,7 @@ import (
|
||||||
// back to the withdraw issuer.
|
// back to the withdraw issuer.
|
||||||
func (bp *Processor) processLock(lock *balanceEvent.Lock) {
|
func (bp *Processor) processLock(lock *balanceEvent.Lock) {
|
||||||
if !bp.alphabetState.IsAlphabet() {
|
if !bp.alphabetState.IsAlphabet() {
|
||||||
bp.log.Info("non alphabet mode, ignore balance lock")
|
bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +25,6 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) {
|
||||||
|
|
||||||
err := bp.frostfsClient.Cheque(prm)
|
err := bp.frostfsClient.Cheque(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bp.log.Error("can't send lock asset tx", zap.Error(err))
|
bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance"
|
||||||
|
@ -60,7 +61,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/balance: balance precision converter is not set")
|
return nil, errors.New("ir/balance: balance precision converter is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("balance worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package container
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
|
containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
|
||||||
"github.com/mr-tron/base58"
|
"github.com/mr-tron/base58"
|
||||||
|
@ -13,7 +14,7 @@ func (cp *Processor) handlePut(ev event.Event) {
|
||||||
put := ev.(putEvent)
|
put := ev.(putEvent)
|
||||||
|
|
||||||
id := sha256.Sum256(put.Container())
|
id := sha256.Sum256(put.Container())
|
||||||
cp.log.Info("notification",
|
cp.log.Info(logs.ContainerNotification,
|
||||||
zap.String("type", "container put"),
|
zap.String("type", "container put"),
|
||||||
zap.String("id", base58.Encode(id[:])))
|
zap.String("id", base58.Encode(id[:])))
|
||||||
|
|
||||||
|
@ -22,14 +23,14 @@ func (cp *Processor) handlePut(ev event.Event) {
|
||||||
err := cp.pool.Submit(func() { cp.processContainerPut(put) })
|
err := cp.pool.Submit(func() { cp.processContainerPut(put) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
cp.log.Warn("container processor worker pool drained",
|
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cp *Processor) handleDelete(ev event.Event) {
|
func (cp *Processor) handleDelete(ev event.Event) {
|
||||||
del := ev.(containerEvent.Delete)
|
del := ev.(containerEvent.Delete)
|
||||||
cp.log.Info("notification",
|
cp.log.Info(logs.ContainerNotification,
|
||||||
zap.String("type", "container delete"),
|
zap.String("type", "container delete"),
|
||||||
zap.String("id", base58.Encode(del.ContainerID())))
|
zap.String("id", base58.Encode(del.ContainerID())))
|
||||||
|
|
||||||
|
@ -38,7 +39,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
|
||||||
err := cp.pool.Submit(func() { cp.processContainerDelete(&del) })
|
err := cp.pool.Submit(func() { cp.processContainerDelete(&del) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
cp.log.Warn("container processor worker pool drained",
|
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,7 +47,7 @@ func (cp *Processor) handleDelete(ev event.Event) {
|
||||||
func (cp *Processor) handleSetEACL(ev event.Event) {
|
func (cp *Processor) handleSetEACL(ev event.Event) {
|
||||||
e := ev.(containerEvent.SetEACL)
|
e := ev.(containerEvent.SetEACL)
|
||||||
|
|
||||||
cp.log.Info("notification",
|
cp.log.Info(logs.ContainerNotification,
|
||||||
zap.String("type", "set EACL"),
|
zap.String("type", "set EACL"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,7 +58,7 @@ func (cp *Processor) handleSetEACL(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
cp.log.Warn("container processor worker pool drained",
|
cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", cp.pool.Cap()))
|
zap.Int("capacity", cp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package container
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
|
@ -35,7 +36,7 @@ type putContainerContext struct {
|
||||||
// and sending approve tx back to the morph.
|
// and sending approve tx back to the morph.
|
||||||
func (cp *Processor) processContainerPut(put putEvent) {
|
func (cp *Processor) processContainerPut(put putEvent) {
|
||||||
if !cp.alphabetState.IsAlphabet() {
|
if !cp.alphabetState.IsAlphabet() {
|
||||||
cp.log.Info("non alphabet mode, ignore container put")
|
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +46,7 @@ func (cp *Processor) processContainerPut(put putEvent) {
|
||||||
|
|
||||||
err := cp.checkPutContainer(ctx)
|
err := cp.checkPutContainer(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("put container check failed",
|
cp.log.Error(logs.ContainerPutContainerCheckFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -119,7 +120,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
|
||||||
err = cp.cnrClient.Put(prm)
|
err = cp.cnrClient.Put(prm)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("could not approve put container",
|
cp.log.Error(logs.ContainerCouldNotApprovePutContainer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -129,13 +130,13 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) {
|
||||||
// and sending approve tx back to morph.
|
// and sending approve tx back to morph.
|
||||||
func (cp *Processor) processContainerDelete(e *containerEvent.Delete) {
|
func (cp *Processor) processContainerDelete(e *containerEvent.Delete) {
|
||||||
if !cp.alphabetState.IsAlphabet() {
|
if !cp.alphabetState.IsAlphabet() {
|
||||||
cp.log.Info("non alphabet mode, ignore container delete")
|
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := cp.checkDeleteContainer(e)
|
err := cp.checkDeleteContainer(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("delete container check failed",
|
cp.log.Error(logs.ContainerDeleteContainerCheckFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -194,7 +195,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) {
|
||||||
err = cp.cnrClient.Delete(prm)
|
err = cp.cnrClient.Delete(prm)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("could not approve delete container",
|
cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
@ -13,13 +14,13 @@ import (
|
||||||
|
|
||||||
func (cp *Processor) processSetEACL(e container.SetEACL) {
|
func (cp *Processor) processSetEACL(e container.SetEACL) {
|
||||||
if !cp.alphabetState.IsAlphabet() {
|
if !cp.alphabetState.IsAlphabet() {
|
||||||
cp.log.Info("non alphabet mode, ignore set EACL")
|
cp.log.Info(logs.ContainerNonAlphabetModeIgnoreSetEACL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := cp.checkSetEACL(e)
|
err := cp.checkSetEACL(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("set EACL check failed",
|
cp.log.Error(logs.ContainerSetEACLCheckFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -91,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) {
|
||||||
err = cp.cnrClient.PutEACL(prm)
|
err = cp.cnrClient.PutEACL(prm)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cp.log.Error("could not approve set EACL",
|
cp.log.Error(logs.ContainerCouldNotApproveSetEACL,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
||||||
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
||||||
|
@ -88,7 +89,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/container: subnet client is not set")
|
return nil, errors.New("ir/container: subnet client is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("container worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package frostfs
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
||||||
|
@ -11,7 +12,7 @@ import (
|
||||||
|
|
||||||
func (np *Processor) handleDeposit(ev event.Event) {
|
func (np *Processor) handleDeposit(ev event.Event) {
|
||||||
deposit := ev.(frostfsEvent.Deposit)
|
deposit := ev.(frostfsEvent.Deposit)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "deposit"),
|
zap.String("type", "deposit"),
|
||||||
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
|
zap.String("id", hex.EncodeToString(slice.CopyReverse(deposit.ID()))))
|
||||||
|
|
||||||
|
@ -20,14 +21,14 @@ func (np *Processor) handleDeposit(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processDeposit(&deposit) })
|
err := np.pool.Submit(func() { np.processDeposit(&deposit) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleWithdraw(ev event.Event) {
|
func (np *Processor) handleWithdraw(ev event.Event) {
|
||||||
withdraw := ev.(frostfsEvent.Withdraw)
|
withdraw := ev.(frostfsEvent.Withdraw)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "withdraw"),
|
zap.String("type", "withdraw"),
|
||||||
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
|
zap.String("id", hex.EncodeToString(slice.CopyReverse(withdraw.ID()))))
|
||||||
|
|
||||||
|
@ -36,14 +37,14 @@ func (np *Processor) handleWithdraw(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processWithdraw(&withdraw) })
|
err := np.pool.Submit(func() { np.processWithdraw(&withdraw) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleCheque(ev event.Event) {
|
func (np *Processor) handleCheque(ev event.Event) {
|
||||||
cheque := ev.(frostfsEvent.Cheque)
|
cheque := ev.(frostfsEvent.Cheque)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "cheque"),
|
zap.String("type", "cheque"),
|
||||||
zap.String("id", hex.EncodeToString(cheque.ID())))
|
zap.String("id", hex.EncodeToString(cheque.ID())))
|
||||||
|
|
||||||
|
@ -52,14 +53,14 @@ func (np *Processor) handleCheque(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processCheque(&cheque) })
|
err := np.pool.Submit(func() { np.processCheque(&cheque) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleConfig(ev event.Event) {
|
func (np *Processor) handleConfig(ev event.Event) {
|
||||||
cfg := ev.(frostfsEvent.Config)
|
cfg := ev.(frostfsEvent.Config)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "set config"),
|
zap.String("type", "set config"),
|
||||||
zap.String("key", hex.EncodeToString(cfg.Key())),
|
zap.String("key", hex.EncodeToString(cfg.Key())),
|
||||||
zap.String("value", hex.EncodeToString(cfg.Value())))
|
zap.String("value", hex.EncodeToString(cfg.Value())))
|
||||||
|
@ -69,14 +70,14 @@ func (np *Processor) handleConfig(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processConfig(&cfg) })
|
err := np.pool.Submit(func() { np.processConfig(&cfg) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleBind(ev event.Event) {
|
func (np *Processor) handleBind(ev event.Event) {
|
||||||
e := ev.(frostfsEvent.Bind)
|
e := ev.(frostfsEvent.Bind)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "bind"),
|
zap.String("type", "bind"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -85,14 +86,14 @@ func (np *Processor) handleBind(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processBind(e) })
|
err := np.pool.Submit(func() { np.processBind(e) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleUnbind(ev event.Event) {
|
func (np *Processor) handleUnbind(ev event.Event) {
|
||||||
e := ev.(frostfsEvent.Unbind)
|
e := ev.(frostfsEvent.Unbind)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.FrostFSNotification,
|
||||||
zap.String("type", "unbind"),
|
zap.String("type", "unbind"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -101,7 +102,7 @@ func (np *Processor) handleUnbind(ev event.Event) {
|
||||||
err := np.pool.Submit(func() { np.processBind(e) })
|
err := np.pool.Submit(func() { np.processBind(e) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("frostfs processor worker pool drained",
|
np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
||||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
@ -16,7 +17,7 @@ const (
|
||||||
// gas in the sidechain.
|
// gas in the sidechain.
|
||||||
func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore deposit")
|
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +30,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
// send transferX to a balance contract
|
// send transferX to a balance contract
|
||||||
err := np.balanceClient.Mint(prm)
|
err := np.balanceClient.Mint(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't transfer assets to balance contract", zap.Error(err))
|
np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
curEpoch := np.epochState.EpochCounter()
|
curEpoch := np.epochState.EpochCounter()
|
||||||
|
@ -43,7 +44,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
|
|
||||||
val, ok := np.mintEmitCache.Get(receiver.String())
|
val, ok := np.mintEmitCache.Get(receiver.String())
|
||||||
if ok && val+np.mintEmitThreshold >= curEpoch {
|
if ok && val+np.mintEmitThreshold >= curEpoch {
|
||||||
np.log.Warn("double mint emission declined",
|
np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined,
|
||||||
zap.String("receiver", receiver.String()),
|
zap.String("receiver", receiver.String()),
|
||||||
zap.Uint64("last_emission", val),
|
zap.Uint64("last_emission", val),
|
||||||
zap.Uint64("current_epoch", curEpoch))
|
zap.Uint64("current_epoch", curEpoch))
|
||||||
|
@ -55,12 +56,12 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
// before gas transfer check if the balance is greater than the threshold
|
// before gas transfer check if the balance is greater than the threshold
|
||||||
balance, err := np.morphClient.GasBalance()
|
balance, err := np.morphClient.GasBalance()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't get gas balance of the node", zap.Error(err))
|
np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if balance < np.gasBalanceThreshold {
|
if balance < np.gasBalanceThreshold {
|
||||||
np.log.Warn("gas balance threshold has been reached",
|
np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached,
|
||||||
zap.Int64("balance", balance),
|
zap.Int64("balance", balance),
|
||||||
zap.Int64("threshold", np.gasBalanceThreshold))
|
zap.Int64("threshold", np.gasBalanceThreshold))
|
||||||
|
|
||||||
|
@ -69,7 +70,7 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
|
|
||||||
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't transfer native gas to receiver",
|
np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -81,14 +82,14 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
|
||||||
// Process withdraw event by locking assets in the balance account.
|
// Process withdraw event by locking assets in the balance account.
|
||||||
func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
|
func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore withdraw")
|
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// create lock account
|
// create lock account
|
||||||
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't create lock account", zap.Error(err))
|
np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +105,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
|
||||||
|
|
||||||
err = np.balanceClient.Lock(prm)
|
err = np.balanceClient.Lock(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't lock assets for withdraw", zap.Error(err))
|
np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +113,7 @@ func (np *Processor) processWithdraw(withdraw *frostfsEvent.Withdraw) {
|
||||||
// the reserve account.
|
// the reserve account.
|
||||||
func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
|
func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore cheque")
|
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,6 +125,6 @@ func (np *Processor) processCheque(cheque *frostfsEvent.Cheque) {
|
||||||
|
|
||||||
err := np.balanceClient.Burn(prm)
|
err := np.balanceClient.Burn(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't transfer assets to fed contract", zap.Error(err))
|
np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
@ -20,7 +21,7 @@ type bindCommon interface {
|
||||||
|
|
||||||
func (np *Processor) processBind(e bindCommon) {
|
func (np *Processor) processBind(e bindCommon) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore bind")
|
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreBind)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,7 +33,7 @@ func (np *Processor) processBind(e bindCommon) {
|
||||||
|
|
||||||
err := np.checkBindCommon(c)
|
err := np.checkBindCommon(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("invalid manage key event",
|
np.log.Error(logs.FrostFSInvalidManageKeyEvent,
|
||||||
zap.Bool("bind", c.bind),
|
zap.Bool("bind", c.bind),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -77,7 +78,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) {
|
||||||
|
|
||||||
u160, err := util.Uint160DecodeBytesBE(scriptHash)
|
u160, err := util.Uint160DecodeBytesBE(scriptHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("could not decode script hash from bytes",
|
np.log.Error(logs.FrostFSCouldNotDecodeScriptHashFromBytes,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package frostfs
|
package frostfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -10,7 +11,7 @@ import (
|
||||||
// the sidechain.
|
// the sidechain.
|
||||||
func (np *Processor) processConfig(config *frostfsEvent.Config) {
|
func (np *Processor) processConfig(config *frostfsEvent.Config) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore config")
|
np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +24,6 @@ func (np *Processor) processConfig(config *frostfsEvent.Config) {
|
||||||
|
|
||||||
err := np.netmapClient.SetConfig(prm)
|
err := np.netmapClient.SetConfig(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't relay set config event", zap.Error(err))
|
np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfsid"
|
||||||
|
@ -98,7 +99,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/frostfs: balance precision converter is not set")
|
return nil, errors.New("ir/frostfs: balance precision converter is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("frostfs worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package governance
|
package governance
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||||
|
@ -30,14 +31,14 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.log.Info("new event", zap.String("type", typ))
|
gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
|
|
||||||
err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) })
|
err := gp.pool.Submit(func() { gp.processAlphabetSync(hash) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
gp.log.Warn("governance worker pool drained",
|
gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained,
|
||||||
zap.Int("capacity", gp.pool.Cap()))
|
zap.Int("capacity", gp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
|
||||||
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
|
@ -20,37 +21,37 @@ const (
|
||||||
|
|
||||||
func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
|
func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
|
||||||
if !gp.alphabetState.IsAlphabet() {
|
if !gp.alphabetState.IsAlphabet() {
|
||||||
gp.log.Info("non alphabet mode, ignore alphabet sync")
|
gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't fetch alphabet list from main net",
|
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sidechainAlphabet, err := gp.morphClient.Committee()
|
sidechainAlphabet, err := gp.morphClient.Committee()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't fetch alphabet list from side chain",
|
gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't merge alphabet lists from main net and side chain",
|
gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if newAlphabet == nil {
|
if newAlphabet == nil {
|
||||||
gp.log.Info("no governance update, alphabet list has not been changed")
|
gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
gp.log.Info("alphabet list has been changed, starting update",
|
gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate,
|
||||||
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
|
zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)),
|
||||||
zap.String("new_alphabet", prettyKeys(newAlphabet)),
|
zap.String("new_alphabet", prettyKeys(newAlphabet)),
|
||||||
)
|
)
|
||||||
|
@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
|
||||||
// 1. Vote to sidechain committee via alphabet contracts.
|
// 1. Vote to sidechain committee via alphabet contracts.
|
||||||
err = gp.voter.VoteForSidechainValidator(votePrm)
|
err = gp.voter.VoteForSidechainValidator(votePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't vote for side chain committee",
|
gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) {
|
||||||
// 4. Update FrostFS contract in the mainnet.
|
// 4. Update FrostFS contract in the mainnet.
|
||||||
gp.updateFrostFSContractInMainnet(newAlphabet)
|
gp.updateFrostFSContractInMainnet(newAlphabet)
|
||||||
|
|
||||||
gp.log.Info("finished alphabet list update")
|
gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate)
|
||||||
}
|
}
|
||||||
|
|
||||||
func prettyKeys(keys keys.PublicKeys) string {
|
func prettyKeys(keys keys.PublicKeys) string {
|
||||||
|
@ -94,21 +95,21 @@ func prettyKeys(keys keys.PublicKeys) string {
|
||||||
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) {
|
||||||
innerRing, err := gp.irFetcher.InnerRingKeys()
|
innerRing, err := gp.irFetcher.InnerRingKeys()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't fetch inner ring list from side chain",
|
gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't create new inner ring list with new alphabet keys",
|
gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(newInnerRing)
|
sort.Sort(newInnerRing)
|
||||||
|
|
||||||
gp.log.Info("update of the inner ring list",
|
gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList,
|
||||||
zap.String("before", prettyKeys(innerRing)),
|
zap.String("before", prettyKeys(innerRing)),
|
||||||
zap.String("after", prettyKeys(newInnerRing)),
|
zap.String("after", prettyKeys(newInnerRing)),
|
||||||
)
|
)
|
||||||
|
@ -130,7 +131,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't update inner ring list with new alphabet keys",
|
gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -147,7 +148,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx
|
||||||
|
|
||||||
err := gp.morphClient.UpdateNotaryList(updPrm)
|
err := gp.morphClient.UpdateNotaryList(updPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't update list of notary nodes in side chain",
|
gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -167,7 +168,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys)
|
||||||
|
|
||||||
err := gp.frostfsClient.AlphabetUpdate(prm)
|
err := gp.frostfsClient.AlphabetUpdate(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gp.log.Error("can't update list of alphabet nodes in frostfs contract",
|
gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package netmap
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||||
|
@ -12,21 +13,21 @@ import (
|
||||||
|
|
||||||
func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
func (np *Processor) HandleNewEpochTick(ev event.Event) {
|
||||||
_ = ev.(timerEvent.NewEpochTick)
|
_ = ev.(timerEvent.NewEpochTick)
|
||||||
np.log.Info("tick", zap.String("type", "epoch"))
|
np.log.Info(logs.NetmapTick, zap.String("type", "epoch"))
|
||||||
|
|
||||||
// send an event to the worker pool
|
// send an event to the worker pool
|
||||||
|
|
||||||
err := np.pool.Submit(func() { np.processNewEpochTick() })
|
err := np.pool.Submit(func() { np.processNewEpochTick() })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleNewEpoch(ev event.Event) {
|
func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||||
epochEvent := ev.(netmapEvent.NewEpoch)
|
epochEvent := ev.(netmapEvent.NewEpoch)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.NetmapNotification,
|
||||||
zap.String("type", "new epoch"),
|
zap.String("type", "new epoch"),
|
||||||
zap.Uint64("value", epochEvent.EpochNumber()))
|
zap.Uint64("value", epochEvent.EpochNumber()))
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,7 +46,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) {
|
||||||
func (np *Processor) handleAddPeer(ev event.Event) {
|
func (np *Processor) handleAddPeer(ev event.Event) {
|
||||||
newPeer := ev.(netmapEvent.AddPeer)
|
newPeer := ev.(netmapEvent.AddPeer)
|
||||||
|
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.NetmapNotification,
|
||||||
zap.String("type", "add peer"),
|
zap.String("type", "add peer"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -56,14 +57,14 @@ func (np *Processor) handleAddPeer(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleUpdateState(ev event.Event) {
|
func (np *Processor) handleUpdateState(ev event.Event) {
|
||||||
updPeer := ev.(netmapEvent.UpdatePeer)
|
updPeer := ev.(netmapEvent.UpdatePeer)
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.NetmapNotification,
|
||||||
zap.String("type", "update peer state"),
|
zap.String("type", "update peer state"),
|
||||||
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes())))
|
||||||
|
|
||||||
|
@ -74,21 +75,21 @@ func (np *Processor) handleUpdateState(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) handleCleanupTick(ev event.Event) {
|
func (np *Processor) handleCleanupTick(ev event.Event) {
|
||||||
if !np.netmapSnapshot.enabled {
|
if !np.netmapSnapshot.enabled {
|
||||||
np.log.Debug("netmap clean up routine is disabled")
|
np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := ev.(netmapCleanupTick)
|
cleanup := ev.(netmapCleanupTick)
|
||||||
|
|
||||||
np.log.Info("tick", zap.String("type", "netmap cleaner"))
|
np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner"))
|
||||||
|
|
||||||
// send event to the worker pool
|
// send event to the worker pool
|
||||||
err := np.pool.Submit(func() {
|
err := np.pool.Submit(func() {
|
||||||
|
@ -96,7 +97,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,7 +105,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) {
|
||||||
func (np *Processor) handleRemoveNode(ev event.Event) {
|
func (np *Processor) handleRemoveNode(ev event.Event) {
|
||||||
removeNode := ev.(subnetevents.RemoveNode)
|
removeNode := ev.(subnetevents.RemoveNode)
|
||||||
|
|
||||||
np.log.Info("notification",
|
np.log.Info(logs.NetmapNotification,
|
||||||
zap.String("type", "remove node from subnet"),
|
zap.String("type", "remove node from subnet"),
|
||||||
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
|
zap.String("subnetID", hex.EncodeToString(removeNode.SubnetworkID())),
|
||||||
zap.String("key", hex.EncodeToString(removeNode.Node())),
|
zap.String("key", hex.EncodeToString(removeNode.Node())),
|
||||||
|
@ -115,7 +116,7 @@ func (np *Processor) handleRemoveNode(ev event.Event) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
np.log.Warn("netmap worker pool drained",
|
np.log.Warn(logs.NetmapNetmapWorkerPoolDrained,
|
||||||
zap.Int("capacity", np.pool.Cap()))
|
zap.Int("capacity", np.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
|
v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -9,7 +10,7 @@ import (
|
||||||
|
|
||||||
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore new netmap cleanup tick")
|
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -17,13 +18,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
||||||
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
|
err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error {
|
||||||
key, err := keys.NewPublicKeyFromString(s)
|
key, err := keys.NewPublicKeyFromString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't decode public key of netmap node",
|
np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode,
|
||||||
zap.String("key", s))
|
zap.String("key", s))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
np.log.Info("vote to remove node from netmap", zap.String("key", s))
|
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s))
|
||||||
|
|
||||||
// In notary environments we call UpdateStateIR method instead of UpdateState.
|
// In notary environments we call UpdateStateIR method instead of UpdateState.
|
||||||
// It differs from UpdateState only by name, so we can do this in the same form.
|
// It differs from UpdateState only by name, so we can do this in the same form.
|
||||||
|
@ -48,13 +49,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't invoke netmap.UpdateState", zap.Error(err))
|
np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't iterate on netmap cleaner cache",
|
np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package netmap
|
package netmap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/audit"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement"
|
||||||
|
@ -16,7 +17,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
|
||||||
|
|
||||||
epochDuration, err := np.netmapClient.EpochDuration()
|
epochDuration, err := np.netmapClient.EpochDuration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't get epoch duration",
|
np.log.Warn(logs.NetmapCantGetEpochDuration,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
} else {
|
} else {
|
||||||
np.epochState.SetEpochDuration(epochDuration)
|
np.epochState.SetEpochDuration(epochDuration)
|
||||||
|
@ -26,20 +27,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
|
||||||
|
|
||||||
h, err := np.netmapClient.Morph().TxHeight(ev.TxHash())
|
h, err := np.netmapClient.Morph().TxHeight(ev.TxHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't get transaction height",
|
np.log.Warn(logs.NetmapCantGetTransactionHeight,
|
||||||
zap.String("hash", ev.TxHash().StringLE()),
|
zap.String("hash", ev.TxHash().StringLE()),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
|
||||||
np.log.Warn("can't reset epoch timer",
|
np.log.Warn(logs.NetmapCantResetEpochTimer,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// get new netmap snapshot
|
// get new netmap snapshot
|
||||||
networkMap, err := np.netmapClient.NetMap()
|
networkMap, err := np.netmapClient.NetMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't get netmap snapshot to perform cleanup",
|
np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -54,7 +55,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
|
||||||
err = np.containerWrp.StartEstimation(prm)
|
err = np.containerWrp.StartEstimation(prm)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("can't start container size estimation",
|
np.log.Warn(logs.NetmapCantStartContainerSizeEstimation,
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
@ -71,15 +72,15 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
|
||||||
// Process new epoch tick by invoking new epoch method in network map contract.
|
// Process new epoch tick by invoking new epoch method in network map contract.
|
||||||
func (np *Processor) processNewEpochTick() {
|
func (np *Processor) processNewEpochTick() {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore new epoch tick")
|
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nextEpoch := np.epochState.EpochCounter() + 1
|
nextEpoch := np.epochState.EpochCounter() + 1
|
||||||
np.log.Debug("next epoch", zap.Uint64("value", nextEpoch))
|
np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch))
|
||||||
|
|
||||||
err := np.netmapClient.NewEpoch(nextEpoch)
|
err := np.netmapClient.NewEpoch(nextEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't invoke netmap.NewEpoch", zap.Error(err))
|
np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap"
|
||||||
subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
|
subnetEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/subnet"
|
||||||
|
@ -16,7 +17,7 @@ import (
|
||||||
// local epoch timer.
|
// local epoch timer.
|
||||||
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore new peer notification")
|
np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
tx := originalRequest.MainTransaction
|
tx := originalRequest.MainTransaction
|
||||||
ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers)
|
ok, err := np.netmapClient.Morph().IsValidScript(tx.Script, tx.Signers)
|
||||||
if err != nil || !ok {
|
if err != nil || !ok {
|
||||||
np.log.Warn("non-halt notary transaction",
|
np.log.Warn(logs.NetmapNonhaltNotaryTransaction,
|
||||||
zap.String("method", "netmap.AddPeer"),
|
zap.String("method", "netmap.AddPeer"),
|
||||||
zap.String("hash", tx.Hash().StringLE()),
|
zap.String("hash", tx.Hash().StringLE()),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
@ -37,14 +38,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
var nodeInfo netmap.NodeInfo
|
var nodeInfo netmap.NodeInfo
|
||||||
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
||||||
// it will be nice to have tx id at event structure to log it
|
// it will be nice to have tx id at event structure to log it
|
||||||
np.log.Warn("can't parse network map candidate")
|
np.log.Warn(logs.NetmapCantParseNetworkMapCandidate)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate and update node info
|
// validate and update node info
|
||||||
err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("could not verify and update information about network map candidate",
|
np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -62,7 +63,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
|
updated := np.netmapSnapshot.touch(keyString, np.epochState.EpochCounter(), nodeInfoBinary)
|
||||||
|
|
||||||
if updated {
|
if updated {
|
||||||
np.log.Info("approving network map candidate",
|
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
|
||||||
zap.String("key", keyString))
|
zap.String("key", keyString))
|
||||||
|
|
||||||
prm := netmapclient.AddPeerPrm{}
|
prm := netmapclient.AddPeerPrm{}
|
||||||
|
@ -89,7 +90,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't invoke netmap.AddPeer", zap.Error(err))
|
np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -97,7 +98,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
||||||
// Process update peer notification by sending approval tx to the smart contract.
|
// Process update peer notification by sending approval tx to the smart contract.
|
||||||
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore update peer notification")
|
np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +111,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
||||||
if ev.Maintenance() {
|
if ev.Maintenance() {
|
||||||
err = np.nodeStateSettings.MaintenanceModeAllowed()
|
err = np.nodeStateSettings.MaintenanceModeAllowed()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Info("prevent switching node to maintenance state",
|
np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -135,19 +136,19 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
||||||
err = np.netmapClient.UpdatePeerState(prm)
|
err = np.netmapClient.UpdatePeerState(prm)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("can't invoke netmap.UpdatePeer", zap.Error(err))
|
np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
||||||
if !np.alphabetState.IsAlphabet() {
|
if !np.alphabetState.IsAlphabet() {
|
||||||
np.log.Info("non alphabet mode, ignore remove node from subnet notification")
|
np.log.Info(logs.NetmapNonAlphabetModeIgnoreRemoveNodeFromSubnetNotification)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
candidates, err := np.netmapClient.GetCandidates()
|
candidates, err := np.netmapClient.GetCandidates()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("could not get network map candidates",
|
np.log.Warn(logs.NetmapCouldNotGetNetworkMapCandidates,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
@ -158,14 +159,14 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
||||||
|
|
||||||
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
|
err = subnetToRemoveFrom.Unmarshal(rawSubnet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("could not unmarshal subnet id",
|
np.log.Warn(logs.NetmapCouldNotUnmarshalSubnetId,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if subnetid.IsZero(subnetToRemoveFrom) {
|
if subnetid.IsZero(subnetToRemoveFrom) {
|
||||||
np.log.Warn("got zero subnet in remove node notification")
|
np.log.Warn(logs.NetmapGotZeroSubnetInRemoveNodeNotification)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,8 +183,8 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Warn("could not iterate over subnetworks of the node", zap.Error(err))
|
np.log.Warn(logs.NetmapCouldNotIterateOverSubnetworksOfTheNode, zap.Error(err))
|
||||||
np.log.Info("vote to remove node from netmap", zap.String("key", hex.EncodeToString(ev.Node())))
|
np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", hex.EncodeToString(ev.Node())))
|
||||||
|
|
||||||
prm := netmapclient.UpdatePeerPrm{}
|
prm := netmapclient.UpdatePeerPrm{}
|
||||||
prm.SetKey(ev.Node())
|
prm.SetKey(ev.Node())
|
||||||
|
@ -191,7 +192,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
||||||
|
|
||||||
err = np.netmapClient.UpdatePeerState(prm)
|
err = np.netmapClient.UpdatePeerState(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("could not invoke netmap.UpdateState", zap.Error(err))
|
np.log.Error(logs.NetmapCouldNotInvokeNetmapUpdateState, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -201,7 +202,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
||||||
|
|
||||||
err = np.netmapClient.AddPeer(prm)
|
err = np.netmapClient.AddPeer(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
np.log.Error("could not invoke netmap.AddPeer", zap.Error(err))
|
np.log.Error(logs.NetmapCouldNotInvokeNetmapAddPeer, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
|
@ -142,7 +143,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/netmap: node state settings is not set")
|
return nil, errors.New("ir/netmap: node state settings is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("netmap worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package reputation
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -13,7 +14,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
|
||||||
peerID := put.PeerID()
|
peerID := put.PeerID()
|
||||||
|
|
||||||
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
|
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
|
||||||
rp.log.Info("notification",
|
rp.log.Info(logs.ReputationNotification,
|
||||||
zap.String("type", "reputation put"),
|
zap.String("type", "reputation put"),
|
||||||
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
|
zap.String("peer_id", hex.EncodeToString(peerID.PublicKey())))
|
||||||
|
|
||||||
|
@ -22,7 +23,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
|
||||||
err := rp.pool.Submit(func() { rp.processPut(&put) })
|
err := rp.pool.Submit(func() { rp.processPut(&put) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// there system can be moved into controlled degradation stage
|
// there system can be moved into controlled degradation stage
|
||||||
rp.log.Warn("reputation worker pool drained",
|
rp.log.Warn(logs.ReputationReputationWorkerPoolDrained,
|
||||||
zap.Int("capacity", rp.pool.Cap()))
|
zap.Int("capacity", rp.pool.Cap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||||
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
||||||
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
apireputation "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/reputation"
|
||||||
|
@ -16,7 +17,7 @@ var errWrongManager = errors.New("got manager that is incorrect for peer")
|
||||||
|
|
||||||
func (rp *Processor) processPut(e *reputationEvent.Put) {
|
func (rp *Processor) processPut(e *reputationEvent.Put) {
|
||||||
if !rp.alphabetState.IsAlphabet() {
|
if !rp.alphabetState.IsAlphabet() {
|
||||||
rp.log.Info("non alphabet mode, ignore reputation put notification")
|
rp.log.Info(logs.ReputationNonAlphabetModeIgnoreReputationPutNotification)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +28,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
|
||||||
// check if epoch is valid
|
// check if epoch is valid
|
||||||
currentEpoch := rp.epochState.EpochCounter()
|
currentEpoch := rp.epochState.EpochCounter()
|
||||||
if epoch >= currentEpoch {
|
if epoch >= currentEpoch {
|
||||||
rp.log.Info("ignore reputation value",
|
rp.log.Info(logs.ReputationIgnoreReputationValue,
|
||||||
zap.String("reason", "invalid epoch number"),
|
zap.String("reason", "invalid epoch number"),
|
||||||
zap.Uint64("trust_epoch", epoch),
|
zap.Uint64("trust_epoch", epoch),
|
||||||
zap.Uint64("local_epoch", currentEpoch))
|
zap.Uint64("local_epoch", currentEpoch))
|
||||||
|
@ -37,7 +38,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
|
||||||
|
|
||||||
// check signature
|
// check signature
|
||||||
if !value.VerifySignature() {
|
if !value.VerifySignature() {
|
||||||
rp.log.Info("ignore reputation value",
|
rp.log.Info(logs.ReputationIgnoreReputationValue,
|
||||||
zap.String("reason", "invalid signature"),
|
zap.String("reason", "invalid signature"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,7 +47,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) {
|
||||||
|
|
||||||
// check if manager is correct
|
// check if manager is correct
|
||||||
if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
|
if err := rp.checkManagers(epoch, value.Manager(), id); err != nil {
|
||||||
rp.log.Info("ignore reputation value",
|
rp.log.Info(logs.ReputationIgnoreReputationValue,
|
||||||
zap.String("reason", "wrong manager"),
|
zap.String("reason", "wrong manager"),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
@ -91,7 +92,7 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
|
// FIXME: #1147 do not use `ToV2` method outside frostfs-api-go library
|
||||||
rp.log.Warn("can't send approval tx for reputation value",
|
rp.log.Warn(logs.ReputationCantSendApprovalTxForReputationValue,
|
||||||
zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
|
zap.String("peer_id", hex.EncodeToString(id.PublicKey())),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
repClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/reputation"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
reputationEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/reputation"
|
||||||
|
@ -71,7 +72,7 @@ func New(p *Params) (*Processor, error) {
|
||||||
return nil, errors.New("ir/reputation: manager builder is not set")
|
return nil, errors.New("ir/reputation: manager builder is not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Log.Debug("reputation worker pool", zap.Int("size", p.PoolSize))
|
p.Log.Debug(logs.ReputationReputationWorkerPool, zap.Int("size", p.PoolSize))
|
||||||
|
|
||||||
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
|
||||||
|
@ -58,32 +59,32 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
|
||||||
)}
|
)}
|
||||||
|
|
||||||
if p.Epoch == 0 {
|
if p.Epoch == 0 {
|
||||||
log.Info("settlements are ignored for zero epoch")
|
log.Info(logs.AuditSettlementsAreIgnoredForZeroEpoch)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("calculate audit settlements")
|
log.Info(logs.AuditCalculateAuditSettlements)
|
||||||
|
|
||||||
log.Debug("getting results for the previous epoch")
|
log.Debug(logs.AuditGettingResultsForThePreviousEpoch)
|
||||||
prevEpoch := p.Epoch - 1
|
prevEpoch := p.Epoch - 1
|
||||||
|
|
||||||
auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch)
|
auditResults, err := c.prm.ResultStorage.AuditResultsForEpoch(prevEpoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("could not collect audit results")
|
log.Error(logs.AuditCouldNotCollectAuditResults)
|
||||||
return
|
return
|
||||||
} else if len(auditResults) == 0 {
|
} else if len(auditResults) == 0 {
|
||||||
log.Debug("no audit results in previous epoch")
|
log.Debug(logs.AuditNoAuditResultsInPreviousEpoch)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
auditFee, err := c.prm.AuditFeeFetcher.AuditFee()
|
auditFee, err := c.prm.AuditFeeFetcher.AuditFee()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("can't fetch audit fee from network config",
|
log.Warn(logs.AuditCantFetchAuditFeeFromNetworkConfig,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
auditFee = 0
|
auditFee = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("processing audit results",
|
log.Debug(logs.AuditProcessingAuditResults,
|
||||||
zap.Int("number", len(auditResults)),
|
zap.Int("number", len(auditResults)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +99,7 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("processing transfers")
|
log.Debug(logs.AuditProcessingTransfers)
|
||||||
|
|
||||||
common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch))
|
common.TransferAssets(c.prm.Exchanger, table, common.AuditSettlementDetails(prevEpoch))
|
||||||
}
|
}
|
||||||
|
@ -109,35 +110,35 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
|
||||||
zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
|
zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
|
||||||
)}
|
)}
|
||||||
|
|
||||||
ctx.log.Debug("reading information about the container")
|
ctx.log.Debug(logs.AuditReadingInformationAboutTheContainer)
|
||||||
|
|
||||||
ok := c.readContainerInfo(ctx)
|
ok := c.readContainerInfo(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.log.Debug("building placement")
|
ctx.log.Debug(logs.AuditBuildingPlacement)
|
||||||
|
|
||||||
ok = c.buildPlacement(ctx)
|
ok = c.buildPlacement(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.log.Debug("collecting passed nodes")
|
ctx.log.Debug(logs.AuditCollectingPassedNodes)
|
||||||
|
|
||||||
ok = c.collectPassNodes(ctx)
|
ok = c.collectPassNodes(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.log.Debug("calculating sum of the sizes of all storage groups")
|
ctx.log.Debug(logs.AuditCalculatingSumOfTheSizesOfAllStorageGroups)
|
||||||
|
|
||||||
ok = c.sumSGSizes(ctx)
|
ok = c.sumSGSizes(ctx)
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.log.Debug("filling transfer table")
|
ctx.log.Debug(logs.AuditFillingTransferTable)
|
||||||
|
|
||||||
c.fillTransferTable(ctx)
|
c.fillTransferTable(ctx)
|
||||||
}
|
}
|
||||||
|
@ -145,7 +146,7 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
|
||||||
func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
|
func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
|
||||||
cnr, ok := ctx.auditResult.Container()
|
cnr, ok := ctx.auditResult.Container()
|
||||||
if !ok {
|
if !ok {
|
||||||
ctx.log.Error("missing container in audit result")
|
ctx.log.Error(logs.AuditMissingContainerInAuditResult)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,7 +154,7 @@ func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
|
||||||
|
|
||||||
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr)
|
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(cnr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Error("could not get container info",
|
ctx.log.Error(logs.AuditCouldNotGetContainerInfo,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -166,14 +167,14 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool {
|
||||||
|
|
||||||
ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID())
|
ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Error("could not get container nodes",
|
ctx.log.Error(logs.AuditCouldNotGetContainerNodes,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
empty := len(ctx.cnrNodes) == 0
|
empty := len(ctx.cnrNodes) == 0
|
||||||
if empty {
|
if empty {
|
||||||
ctx.log.Debug("empty list of container nodes")
|
ctx.log.Debug(logs.AuditEmptyListOfContainerNodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err == nil && !empty
|
return err == nil && !empty
|
||||||
|
@ -206,7 +207,7 @@ func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool {
|
||||||
|
|
||||||
empty := len(ctx.passNodes) == 0
|
empty := len(ctx.passNodes) == 0
|
||||||
if empty {
|
if empty {
|
||||||
ctx.log.Debug("none of the container nodes passed the audit")
|
ctx.log.Debug(logs.AuditNoneOfTheContainerNodesPassedTheAudit)
|
||||||
}
|
}
|
||||||
|
|
||||||
return !empty
|
return !empty
|
||||||
|
@ -224,7 +225,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
||||||
|
|
||||||
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
|
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Error("could not get SG info",
|
ctx.log.Error(logs.AuditCouldNotGetSGInfo,
|
||||||
zap.String("id", id.String()),
|
zap.String("id", id.String()),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -244,7 +245,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if sumPassSGSize == 0 {
|
if sumPassSGSize == 0 {
|
||||||
ctx.log.Debug("zero sum SG size")
|
ctx.log.Debug(logs.AuditZeroSumSGSize)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +261,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
||||||
for k, info := range ctx.passNodes {
|
for k, info := range ctx.passNodes {
|
||||||
ownerID, err := c.prm.AccountStorage.ResolveKey(info)
|
ownerID, err := c.prm.AccountStorage.ResolveKey(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Error("could not resolve public key of the storage node",
|
ctx.log.Error(logs.AuditCouldNotResolvePublicKeyOfTheStorageNode,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("key", k),
|
zap.String("key", k),
|
||||||
)
|
)
|
||||||
|
@ -270,7 +271,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
||||||
|
|
||||||
price := info.Price()
|
price := info.Price()
|
||||||
|
|
||||||
ctx.log.Debug("calculating storage node salary for audit (GASe-12)",
|
ctx.log.Debug(logs.AuditCalculatingStorageNodeSalaryForAudit,
|
||||||
zap.Stringer("sum SG size", ctx.sumSGSize),
|
zap.Stringer("sum SG size", ctx.sumSGSize),
|
||||||
zap.Stringer("price", price),
|
zap.Stringer("price", price),
|
||||||
)
|
)
|
||||||
|
@ -292,7 +293,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
||||||
// add txs to pay inner ring node for audit result
|
// add txs to pay inner ring node for audit result
|
||||||
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
|
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Error("could not parse public key of the inner ring node",
|
ctx.log.Error(logs.AuditCouldNotParsePublicKeyOfTheInnerRingNode,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
|
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
|
||||||
)
|
)
|
||||||
|
|
|
@ -3,6 +3,7 @@ package basic
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
||||||
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
||||||
|
|
||||||
cachedRate, err := inc.rate.BasicRate()
|
cachedRate, err := inc.rate.BasicRate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Error("can't get basic income rate",
|
inc.log.Error(logs.BasicCantGetBasicIncomeRate,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -33,7 +34,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
||||||
|
|
||||||
cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
|
cnrEstimations, err := inc.estimations.Estimations(inc.epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Error("can't fetch container size estimations",
|
inc.log.Error(logs.BasicCantFetchContainerSizeEstimations,
|
||||||
zap.Uint64("epoch", inc.epoch),
|
zap.Uint64("epoch", inc.epoch),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
@ -45,7 +46,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
||||||
for i := range cnrEstimations {
|
for i := range cnrEstimations {
|
||||||
owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
|
owner, err := inc.container.ContainerInfo(cnrEstimations[i].ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Warn("can't fetch container info",
|
inc.log.Warn(logs.BasicCantFetchContainerInfo,
|
||||||
zap.Uint64("epoch", inc.epoch),
|
zap.Uint64("epoch", inc.epoch),
|
||||||
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
@ -55,7 +56,7 @@ func (inc *IncomeSettlementContext) Collect() {
|
||||||
|
|
||||||
cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
|
cnrNodes, err := inc.placement.ContainerNodes(inc.epoch, cnrEstimations[i].ContainerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Debug("can't fetch container info",
|
inc.log.Debug(logs.BasicCantFetchContainerInfo,
|
||||||
zap.Uint64("epoch", inc.epoch),
|
zap.Uint64("epoch", inc.epoch),
|
||||||
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
zap.Stringer("container_id", cnrEstimations[i].ContainerID),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/common"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +21,7 @@ func (inc *IncomeSettlementContext) Distribute() {
|
||||||
|
|
||||||
bankBalance, err := inc.balances.Balance(inc.bankOwner)
|
bankBalance, err := inc.balances.Balance(inc.bankOwner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Error("can't fetch balance of banking account",
|
inc.log.Error(logs.BasicCantFetchBalanceOfBankingAccount,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -31,7 +32,7 @@ func (inc *IncomeSettlementContext) Distribute() {
|
||||||
inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
|
inc.distributeTable.Iterate(func(key []byte, n *big.Int) {
|
||||||
nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
|
nodeOwner, err := inc.accounts.ResolveKey(nodeInfoWrapper(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inc.log.Warn("can't transform public key to owner id",
|
inc.log.Warn(logs.BasicCantTransformPublicKeyToOwnerID,
|
||||||
zap.String("public_key", hex.EncodeToString(key)),
|
zap.String("public_key", hex.EncodeToString(key)),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package settlement
|
package settlement
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -14,7 +15,7 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
|
||||||
epoch := ev.Epoch()
|
epoch := ev.Epoch()
|
||||||
|
|
||||||
if !p.state.IsAlphabet() {
|
if !p.state.IsAlphabet() {
|
||||||
p.log.Info("non alphabet mode, ignore audit payments")
|
p.log.Info(logs.SettlementNonAlphabetModeIgnoreAuditPayments)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -23,10 +24,10 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
)}
|
)}
|
||||||
|
|
||||||
log.Info("new audit settlement event")
|
log.Info(logs.SettlementNewAuditSettlementEvent)
|
||||||
|
|
||||||
if epoch == 0 {
|
if epoch == 0 {
|
||||||
log.Debug("ignore genesis epoch")
|
log.Debug(logs.SettlementIgnoreGenesisEpoch)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,14 +39,14 @@ func (p *Processor) HandleAuditEvent(e event.Event) {
|
||||||
|
|
||||||
err := p.pool.Submit(handler.handle)
|
err := p.pool.Submit(handler.handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn("could not add handler of AuditEvent to queue",
|
log.Warn(logs.SettlementCouldNotAddHandlerOfAuditEventToQueue,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("AuditEvent handling successfully scheduled")
|
log.Debug(logs.SettlementAuditEventHandlingSuccessfullyScheduled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
|
func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
|
||||||
|
@ -53,19 +54,19 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
|
||||||
epoch := ev.Epoch()
|
epoch := ev.Epoch()
|
||||||
|
|
||||||
if !p.state.IsAlphabet() {
|
if !p.state.IsAlphabet() {
|
||||||
p.log.Info("non alphabet mode, ignore income collection event")
|
p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeCollectionEvent)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.log.Info("start basic income collection",
|
p.log.Info(logs.SettlementStartBasicIncomeCollection,
|
||||||
zap.Uint64("epoch", epoch))
|
zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
p.contextMu.Lock()
|
p.contextMu.Lock()
|
||||||
defer p.contextMu.Unlock()
|
defer p.contextMu.Unlock()
|
||||||
|
|
||||||
if _, ok := p.incomeContexts[epoch]; ok {
|
if _, ok := p.incomeContexts[epoch]; ok {
|
||||||
p.log.Error("income context already exists",
|
p.log.Error(logs.SettlementIncomeContextAlreadyExists,
|
||||||
zap.Uint64("epoch", epoch))
|
zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -73,7 +74,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
|
||||||
|
|
||||||
incomeCtx, err := p.basicIncome.CreateContext(epoch)
|
incomeCtx, err := p.basicIncome.CreateContext(epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.log.Error("can't create income context",
|
p.log.Error(logs.SettlementCantCreateIncomeContext,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -85,7 +86,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) {
|
||||||
incomeCtx.Collect()
|
incomeCtx.Collect()
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.log.Warn("could not add handler of basic income collection to queue",
|
p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeCollectionToQueue,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,12 +99,12 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
|
||||||
epoch := ev.Epoch()
|
epoch := ev.Epoch()
|
||||||
|
|
||||||
if !p.state.IsAlphabet() {
|
if !p.state.IsAlphabet() {
|
||||||
p.log.Info("non alphabet mode, ignore income distribution event")
|
p.log.Info(logs.SettlementNonAlphabetModeIgnoreIncomeDistributionEvent)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.log.Info("start basic income distribution",
|
p.log.Info(logs.SettlementStartBasicIncomeDistribution,
|
||||||
zap.Uint64("epoch", epoch))
|
zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
p.contextMu.Lock()
|
p.contextMu.Lock()
|
||||||
|
@ -113,7 +114,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
|
||||||
delete(p.incomeContexts, epoch)
|
delete(p.incomeContexts, epoch)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
p.log.Warn("income context distribution does not exists",
|
p.log.Warn(logs.SettlementIncomeContextDistributionDoesNotExists,
|
||||||
zap.Uint64("epoch", epoch))
|
zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -123,7 +124,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) {
|
||||||
incomeCtx.Distribute()
|
incomeCtx.Distribute()
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.log.Warn("could not add handler of basic income distribution to queue",
|
p.log.Warn(logs.SettlementCouldNotAddHandlerOfBasicIncomeDistributionToQueue,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package settlement
|
package settlement
|
||||||
|
|
||||||
import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
)
|
||||||
|
|
||||||
type auditEventHandler struct {
|
type auditEventHandler struct {
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
|
@ -11,9 +14,9 @@ type auditEventHandler struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *auditEventHandler) handle() {
|
func (p *auditEventHandler) handle() {
|
||||||
p.log.Info("process audit settlements")
|
p.log.Info(logs.SettlementProcessAuditSettlements)
|
||||||
|
|
||||||
p.proc.ProcessAuditSettlements(p.epoch)
|
p.proc.ProcessAuditSettlements(p.epoch)
|
||||||
|
|
||||||
p.log.Info("audit processing finished")
|
p.log.Info(logs.SettlementAuditProcessingFinished)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/basic"
|
||||||
nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
nodeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
|
@ -63,7 +64,7 @@ func New(prm Prm, opts ...Option) *Processor {
|
||||||
panic(fmt.Errorf("could not create worker pool: %w", err))
|
panic(fmt.Errorf("could not create worker pool: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
o.log.Debug("worker pool for settlement processor successfully initialized",
|
o.log.Debug(logs.SettlementWorkerPoolForSettlementProcessorSuccessfullyInitialized,
|
||||||
zap.Int("capacity", o.poolSize),
|
zap.Int("capacity", o.poolSize),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||||
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
|
storagegroup2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/storagegroup"
|
||||||
|
@ -94,7 +95,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
|
||||||
|
|
||||||
cli, err := c.getWrappedClient(info)
|
cli, err := c.getWrappedClient(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn("can't setup remote connection",
|
c.log.Warn(logs.InnerringCantSetupRemoteConnection,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
@ -109,7 +110,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Warn("can't get storage group object",
|
c.log.Warn(logs.InnerringCantGetStorageGroupObject,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/settlement/audit"
|
||||||
|
@ -223,7 +224,7 @@ func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, det
|
||||||
)
|
)
|
||||||
|
|
||||||
if !amount.IsInt64() {
|
if !amount.IsInt64() {
|
||||||
s.log.Error("amount can not be represented as an int64")
|
s.log.Error(logs.InnerringAmountCanNotBeRepresentedAsAnInt64)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -262,7 +263,7 @@ func (b basicIncomeSettlementDeps) Estimations(epoch uint64) ([]*containerClient
|
||||||
for i := range estimationIDs {
|
for i := range estimationIDs {
|
||||||
estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i])
|
estimation, err := b.cnrClient.GetUsedSpaceEstimations(estimationIDs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Warn("can't get used space estimation",
|
b.log.Warn(logs.InnerringCantGetUsedSpaceEstimation,
|
||||||
zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])),
|
zap.String("estimation_id", hex.EncodeToString(estimationIDs[i])),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance"
|
||||||
auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
|
auditClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/audit"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/audit"
|
||||||
|
@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool {
|
||||||
func (s *Server) InnerRingIndex() int {
|
func (s *Server) InnerRingIndex() int {
|
||||||
index, err := s.statusIndex.InnerRingIndex()
|
index, err := s.statusIndex.InnerRingIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't get inner ring index", zap.String("error", err.Error()))
|
s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int {
|
||||||
func (s *Server) InnerRingSize() int {
|
func (s *Server) InnerRingSize() int {
|
||||||
size, err := s.statusIndex.InnerRingSize()
|
size, err := s.statusIndex.InnerRingSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't get inner ring size", zap.String("error", err.Error()))
|
s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int {
|
||||||
func (s *Server) AlphabetIndex() int {
|
func (s *Server) AlphabetIndex() int {
|
||||||
index, err := s.statusIndex.AlphabetIndex()
|
index, err := s.statusIndex.AlphabetIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't get alphabet index", zap.String("error", err.Error()))
|
s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
||||||
|
|
||||||
index := s.InnerRingIndex()
|
index := s.InnerRingIndex()
|
||||||
if s.contracts.alphabet.indexOutOfRange(index) {
|
if s.contracts.alphabet.indexOutOfRange(index) {
|
||||||
s.log.Info("ignore validator vote: node not in alphabet range")
|
s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(validators) == 0 {
|
if len(validators) == 0 {
|
||||||
s.log.Info("ignore validator vote: empty validators list")
|
s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
||||||
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
|
s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) {
|
||||||
err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
|
err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("can't invoke vote method in alphabet contract",
|
s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract,
|
||||||
zap.Int8("alphabet_index", int8(letter)),
|
zap.Int8("alphabet_index", int8(letter)),
|
||||||
zap.Uint64("epoch", epoch),
|
zap.Uint64("epoch", epoch),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet"
|
irsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/subnet"
|
||||||
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
|
||||||
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
morphsubnet "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/subnet"
|
||||||
|
@ -173,7 +174,7 @@ func (s *Server) catchSubnetCreation(e event.Event) {
|
||||||
s.handleSubnetCreation(e)
|
s.handleSubnetCreation(e)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("subnet creation queue failure",
|
s.log.Error(logs.InnerringSubnetCreationQueueFailure,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -225,7 +226,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
|
||||||
ev: putEv,
|
ev: putEv,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Info("discard subnet creation",
|
s.log.Info(logs.InnerringDiscardSubnetCreation,
|
||||||
zap.String("reason", err.Error()),
|
zap.String("reason", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -251,7 +252,7 @@ func (s *Server) handleSubnetCreation(e event.Event) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("approve subnet creation",
|
s.log.Error(logs.InnerringApproveSubnetCreation,
|
||||||
zap.Bool("notary", isNotary),
|
zap.Bool("notary", isNotary),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -266,7 +267,7 @@ func (s *Server) catchSubnetRemoval(e event.Event) {
|
||||||
s.handleSubnetRemoval(e)
|
s.handleSubnetRemoval(e)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("subnet removal handling failure",
|
s.log.Error(logs.InnerringSubnetRemovalHandlingFailure,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -280,7 +281,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
|
||||||
|
|
||||||
candidates, err := s.netmapClient.GetCandidates()
|
candidates, err := s.netmapClient.GetCandidates()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("getting netmap candidates",
|
s.log.Error(logs.InnerringGettingNetmapCandidates,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -290,7 +291,7 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
|
||||||
var removedID subnetid.ID
|
var removedID subnetid.ID
|
||||||
err = removedID.Unmarshal(delEv.ID())
|
err = removedID.Unmarshal(delEv.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("unmarshalling removed subnet ID",
|
s.log.Error(logs.InnerringUnmarshallingRemovedSubnetID,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -318,8 +319,8 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("iterating node's subnets", zap.Error(err))
|
log.Error(logs.InnerringIteratingNodesSubnets, zap.Error(err))
|
||||||
log.Debug("removing node from netmap candidates")
|
log.Debug(logs.InnerringRemovingNodeFromNetmapCandidates)
|
||||||
|
|
||||||
var updateStatePrm netmapclient.UpdatePeerPrm
|
var updateStatePrm netmapclient.UpdatePeerPrm
|
||||||
updateStatePrm.SetKey(c.PublicKey())
|
updateStatePrm.SetKey(c.PublicKey())
|
||||||
|
@ -327,7 +328,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
||||||
|
|
||||||
err = s.netmapClient.UpdatePeerState(updateStatePrm)
|
err = s.netmapClient.UpdatePeerState(updateStatePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("removing node from candidates",
|
log.Error(logs.InnerringRemovingNodeFromCandidates,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -338,7 +339,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
||||||
// remove subnet from node's information
|
// remove subnet from node's information
|
||||||
// if it contains removed subnet
|
// if it contains removed subnet
|
||||||
if removeSubnet {
|
if removeSubnet {
|
||||||
log.Debug("removing subnet from the node")
|
log.Debug(logs.InnerringRemovingSubnetFromTheNode)
|
||||||
|
|
||||||
var addPeerPrm netmapclient.AddPeerPrm
|
var addPeerPrm netmapclient.AddPeerPrm
|
||||||
addPeerPrm.SetNodeInfo(c)
|
addPeerPrm.SetNodeInfo(c)
|
||||||
|
@ -346,7 +347,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
||||||
|
|
||||||
err = s.netmapClient.AddPeer(addPeerPrm)
|
err = s.netmapClient.AddPeer(addPeerPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("updating subnet info",
|
log.Error(logs.InnerringUpdatingSubnetInfo,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -14,7 +15,7 @@ import (
|
||||||
//
|
//
|
||||||
// If the database file does not exist, it will be created automatically.
|
// If the database file does not exist, it will be created automatically.
|
||||||
func (b *Blobovnicza) Open() error {
|
func (b *Blobovnicza) Open() error {
|
||||||
b.log.Debug("creating directory for BoltDB",
|
b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
zap.Bool("ro", b.boltOptions.ReadOnly),
|
zap.Bool("ro", b.boltOptions.ReadOnly),
|
||||||
)
|
)
|
||||||
|
@ -28,7 +29,7 @@ func (b *Blobovnicza) Open() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug("opening BoltDB",
|
b.log.Debug(logs.BlobovniczaOpeningBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
zap.Stringer("permissions", b.perm),
|
zap.Stringer("permissions", b.perm),
|
||||||
)
|
)
|
||||||
|
@ -44,13 +45,13 @@ func (b *Blobovnicza) Open() error {
|
||||||
//
|
//
|
||||||
// Should not be called in read-only configuration.
|
// Should not be called in read-only configuration.
|
||||||
func (b *Blobovnicza) Init() error {
|
func (b *Blobovnicza) Init() error {
|
||||||
b.log.Debug("initializing...",
|
b.log.Debug(logs.BlobovniczaInitializing,
|
||||||
zap.Uint64("object size limit", b.objSizeLimit),
|
zap.Uint64("object size limit", b.objSizeLimit),
|
||||||
zap.Uint64("storage size limit", b.fullSizeLimit),
|
zap.Uint64("storage size limit", b.fullSizeLimit),
|
||||||
)
|
)
|
||||||
|
|
||||||
if size := b.filled.Load(); size != 0 {
|
if size := b.filled.Load(); size != 0 {
|
||||||
b.log.Debug("already initialized", zap.Uint64("size", size))
|
b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +60,7 @@ func (b *Blobovnicza) Init() error {
|
||||||
// create size range bucket
|
// create size range bucket
|
||||||
|
|
||||||
rangeStr := stringifyBounds(lower, upper)
|
rangeStr := stringifyBounds(lower, upper)
|
||||||
b.log.Debug("creating bucket for size range",
|
b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange,
|
||||||
zap.String("range", rangeStr))
|
zap.String("range", rangeStr))
|
||||||
|
|
||||||
_, err := tx.CreateBucketIfNotExists(key)
|
_, err := tx.CreateBucketIfNotExists(key)
|
||||||
|
@ -86,7 +87,7 @@ func (b *Blobovnicza) Init() error {
|
||||||
|
|
||||||
// Close releases all internal database resources.
|
// Close releases all internal database resources.
|
||||||
func (b *Blobovnicza) Close() error {
|
func (b *Blobovnicza) Close() error {
|
||||||
b.log.Debug("closing BoltDB",
|
b.log.Debug(logs.BlobovniczaClosingBoltDB,
|
||||||
zap.String("path", b.path),
|
zap.String("path", b.path),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package blobovnicza
|
package blobovnicza
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -51,7 +52,7 @@ func (b *Blobovnicza) Delete(prm DeletePrm) (DeleteRes, error) {
|
||||||
err := buck.Delete(addrKey)
|
err := buck.Delete(addrKey)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
b.log.Debug("object was removed from bucket",
|
b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket,
|
||||||
zap.String("binary size", stringifyByteSize(sz)),
|
zap.String("binary size", stringifyByteSize(sz)),
|
||||||
zap.String("range", stringifyBounds(lower, upper)),
|
zap.String("range", stringifyBounds(lower, upper)),
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
|
||||||
|
@ -104,12 +105,12 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
|
||||||
// it from opened cache.
|
// it from opened cache.
|
||||||
return
|
return
|
||||||
} else if err := value.Close(); err != nil {
|
} else if err := value.Close(); err != nil {
|
||||||
blz.log.Error("could not close Blobovnicza",
|
blz.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza,
|
||||||
zap.String("id", p),
|
zap.String("id", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
blz.log.Debug("blobovnicza successfully closed on evict",
|
blz.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyClosedOnEvict,
|
||||||
zap.String("id", p),
|
zap.String("id", p),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -141,11 +142,11 @@ func (b *Blobovniczas) getActivated(lvlPath string) (blobovniczaWithIndex, error
|
||||||
//
|
//
|
||||||
// if current active blobovnicza's index is not old, it remains unchanged.
|
// if current active blobovnicza's index is not old, it remains unchanged.
|
||||||
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
|
func (b *Blobovniczas) updateActive(lvlPath string, old *uint64) error {
|
||||||
b.log.Debug("updating active blobovnicza...", zap.String("path", lvlPath))
|
b.log.Debug(logs.BlobovniczatreeUpdatingActiveBlobovnicza, zap.String("path", lvlPath))
|
||||||
|
|
||||||
_, err := b.updateAndGet(lvlPath, old)
|
_, err := b.updateAndGet(lvlPath, old)
|
||||||
|
|
||||||
b.log.Debug("active blobovnicza successfully updated", zap.String("path", lvlPath))
|
b.log.Debug(logs.BlobovniczatreeActiveBlobovniczaSuccessfullyUpdated, zap.String("path", lvlPath))
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -201,7 +202,7 @@ func (b *Blobovniczas) updateAndGet(lvlPath string, old *uint64) (blobovniczaWit
|
||||||
}
|
}
|
||||||
b.lruMtx.Unlock()
|
b.lruMtx.Unlock()
|
||||||
|
|
||||||
b.log.Debug("blobovnicza successfully activated",
|
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyActivated,
|
||||||
zap.String("path", activePath))
|
zap.String("path", activePath))
|
||||||
|
|
||||||
return active, nil
|
return active, nil
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -18,10 +19,10 @@ func (b *Blobovniczas) Open(readOnly bool) error {
|
||||||
//
|
//
|
||||||
// Should be called exactly once.
|
// Should be called exactly once.
|
||||||
func (b *Blobovniczas) Init() error {
|
func (b *Blobovniczas) Init() error {
|
||||||
b.log.Debug("initializing Blobovnicza's")
|
b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas)
|
||||||
|
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
b.log.Debug("read-only mode, skip blobovniczas initialization...")
|
b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +37,7 @@ func (b *Blobovniczas) Init() error {
|
||||||
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
|
return true, fmt.Errorf("could not initialize blobovnicza structure %s: %w", p, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log.Debug("blobovnicza successfully initialized, closing...", zap.String("id", p))
|
b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p))
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -49,7 +50,7 @@ func (b *Blobovniczas) Close() error {
|
||||||
|
|
||||||
for p, v := range b.active {
|
for p, v := range b.active {
|
||||||
if err := v.blz.Close(); err != nil {
|
if err := v.blz.Close(); err != nil {
|
||||||
b.log.Debug("could not close active blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
|
||||||
zap.String("path", p),
|
zap.String("path", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -59,7 +60,7 @@ func (b *Blobovniczas) Close() error {
|
||||||
for _, k := range b.opened.Keys() {
|
for _, k := range b.opened.Keys() {
|
||||||
blz, _ := b.opened.Get(k)
|
blz, _ := b.opened.Get(k)
|
||||||
if err := blz.Close(); err != nil {
|
if err := blz.Close(); err != nil {
|
||||||
b.log.Debug("could not close active blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotCloseActiveBlobovnicza,
|
||||||
zap.String("path", k),
|
zap.String("path", k),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -3,6 +3,7 @@ package blobovniczatree
|
||||||
import (
|
import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
|
@ -44,7 +45,7 @@ func (b *Blobovniczas) Delete(prm common.DeletePrm) (res common.DeleteRes, err e
|
||||||
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
|
res, err = b.deleteObjectFromLevel(bPrm, p, !ok, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !blobovnicza.IsErrNotFound(err) {
|
if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not remove object from level",
|
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -83,7 +84,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
|
||||||
if res, err := b.deleteObject(v, prm, dp); err == nil {
|
if res, err := b.deleteObject(v, prm, dp); err == nil {
|
||||||
return res, err
|
return res, err
|
||||||
} else if !blobovnicza.IsErrNotFound(err) {
|
} else if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not remove object from opened blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromOpenedBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -102,7 +103,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
|
||||||
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
|
if res, err := b.deleteObject(active.blz, prm, dp); err == nil {
|
||||||
return res, err
|
return res, err
|
||||||
} else if !blobovnicza.IsErrNotFound(err) {
|
} else if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not remove object from active blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromActiveBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
@ -47,7 +48,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
|
||||||
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
|
_, err := b.getObjectFromLevel(ctx, gPrm, p, !ok)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !blobovnicza.IsErrNotFound(err) {
|
if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not get object from level",
|
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
|
@ -53,7 +54,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
|
||||||
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
|
res, err = b.getObjectFromLevel(ctx, bPrm, p, !ok)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !blobovnicza.IsErrNotFound(err) {
|
if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not get object from level",
|
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -88,7 +89,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
|
||||||
if res, err := b.getObject(ctx, v, prm); err == nil {
|
if res, err := b.getObject(ctx, v, prm); err == nil {
|
||||||
return res, err
|
return res, err
|
||||||
} else if !blobovnicza.IsErrNotFound(err) {
|
} else if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not read object from opened blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotReadObjectFromOpenedBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -108,7 +109,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G
|
||||||
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
|
if res, err := b.getObject(ctx, active.blz, prm); err == nil {
|
||||||
return res, err
|
return res, err
|
||||||
} else if !blobovnicza.IsErrNotFound(err) {
|
} else if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not get object from active blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromActiveBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
|
@ -54,7 +55,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
|
||||||
if err != nil {
|
if err != nil {
|
||||||
outOfBounds := isErrOutOfRange(err)
|
outOfBounds := isErrOutOfRange(err)
|
||||||
if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
|
if !outOfBounds && !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not get object from level",
|
b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel,
|
||||||
zap.String("level", p),
|
zap.String("level", p),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -98,7 +99,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
|
||||||
return res, err
|
return res, err
|
||||||
default:
|
default:
|
||||||
if !blobovnicza.IsErrNotFound(err) {
|
if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not read payload range from opened blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromOpenedBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -123,7 +124,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang
|
||||||
return res, err
|
return res, err
|
||||||
default:
|
default:
|
||||||
if !blobovnicza.IsErrNotFound(err) {
|
if !blobovnicza.IsErrNotFound(err) {
|
||||||
b.log.Debug("could not read payload range from active blobovnicza",
|
b.log.Debug(logs.BlobovniczatreeCouldNotReadPayloadRangeFromActiveBlobovnicza,
|
||||||
zap.String("path", blzPath),
|
zap.String("path", blzPath),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
|
@ -56,9 +57,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
|
||||||
active, err := i.B.getActivated(path)
|
active, err := i.B.getActivated(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError("could not get active blobovnicza", err)
|
i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug("could not get active blobovnicza",
|
i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,15 +72,15 @@ func (i *putIterator) iterate(path string) (bool, error) {
|
||||||
// and `updateActive` takes care of not updating the active blobovnicza twice.
|
// and `updateActive` takes care of not updating the active blobovnicza twice.
|
||||||
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
|
if isFull := errors.Is(err, blobovnicza.ErrFull); isFull || errors.Is(err, bbolt.ErrDatabaseNotOpen) {
|
||||||
if isFull {
|
if isFull {
|
||||||
i.B.log.Debug("blobovnicza overflowed",
|
i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed,
|
||||||
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
|
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := i.B.updateActive(path, &active.ind); err != nil {
|
if err := i.B.updateActive(path, &active.ind); err != nil {
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError("could not update active blobovnicza", err)
|
i.B.reportError(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug("could not update active blobovnicza",
|
i.B.log.Debug(logs.BlobovniczatreeCouldNotUpdateActiveBlobovnicza,
|
||||||
zap.String("level", path),
|
zap.String("level", path),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
@ -92,9 +93,9 @@ func (i *putIterator) iterate(path string) (bool, error) {
|
||||||
|
|
||||||
i.AllFull = false
|
i.AllFull = false
|
||||||
if !isLogical(err) {
|
if !isLogical(err) {
|
||||||
i.B.reportError("could not put object to active blobovnicza", err)
|
i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err)
|
||||||
} else {
|
} else {
|
||||||
i.B.log.Debug("could not put object to active blobovnicza",
|
i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
|
||||||
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
|
zap.String("path", filepath.Join(path, u64ToHexString(active.ind))),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,12 +4,13 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Open opens BlobStor.
|
// Open opens BlobStor.
|
||||||
func (b *BlobStor) Open(readOnly bool) error {
|
func (b *BlobStor) Open(readOnly bool) error {
|
||||||
b.log.Debug("opening...")
|
b.log.Debug(logs.BlobstorOpening)
|
||||||
|
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
err := b.storage[i].Storage.Open(readOnly)
|
err := b.storage[i].Storage.Open(readOnly)
|
||||||
|
@ -29,7 +30,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
|
||||||
//
|
//
|
||||||
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
|
// Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure.
|
||||||
func (b *BlobStor) Init() error {
|
func (b *BlobStor) Init() error {
|
||||||
b.log.Debug("initializing...")
|
b.log.Debug(logs.BlobstorInitializing)
|
||||||
|
|
||||||
if err := b.compression.Init(); err != nil {
|
if err := b.compression.Init(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -46,13 +47,13 @@ func (b *BlobStor) Init() error {
|
||||||
|
|
||||||
// Close releases all internal resources of BlobStor.
|
// Close releases all internal resources of BlobStor.
|
||||||
func (b *BlobStor) Close() error {
|
func (b *BlobStor) Close() error {
|
||||||
b.log.Debug("closing...")
|
b.log.Debug(logs.BlobstorClosing)
|
||||||
|
|
||||||
var firstErr error
|
var firstErr error
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
err := b.storage[i].Storage.Close()
|
err := b.storage[i].Storage.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.log.Info("couldn't close storage", zap.String("error", err.Error()))
|
b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
|
||||||
if firstErr == nil {
|
if firstErr == nil {
|
||||||
firstErr = err
|
firstErr = err
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
@ -57,7 +58,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, err := range errors[:len(errors)-1] {
|
for _, err := range errors[:len(errors)-1] {
|
||||||
b.log.Warn("error occurred during object existence checking",
|
b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
|
||||||
zap.Stringer("address", prm.Address),
|
zap.Stringer("address", prm.Address),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package blobstor
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -38,7 +39,7 @@ func IterateBinaryObjects(blz *BlobStor, f func(addr oid.Address, data []byte, d
|
||||||
}
|
}
|
||||||
prm.IgnoreErrors = true
|
prm.IgnoreErrors = true
|
||||||
prm.ErrorHandler = func(addr oid.Address, err error) error {
|
prm.ErrorHandler = func(addr oid.Address, err error) error {
|
||||||
blz.log.Warn("error occurred during the iteration",
|
blz.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration,
|
||||||
zap.Stringer("address", addr),
|
zap.Stringer("address", addr),
|
||||||
zap.String("err", err.Error()))
|
zap.String("err", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -47,7 +48,7 @@ func (e *StorageEngine) open() error {
|
||||||
|
|
||||||
for res := range errCh {
|
for res := range errCh {
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
e.log.Error("could not open shard, closing and skipping",
|
e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping,
|
||||||
zap.String("id", res.id),
|
zap.String("id", res.id),
|
||||||
zap.Error(res.err))
|
zap.Error(res.err))
|
||||||
|
|
||||||
|
@ -56,7 +57,7 @@ func (e *StorageEngine) open() error {
|
||||||
|
|
||||||
err := sh.Close()
|
err := sh.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("could not close partially initialized shard",
|
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||||
zap.String("id", res.id),
|
zap.String("id", res.id),
|
||||||
zap.Error(res.err))
|
zap.Error(res.err))
|
||||||
}
|
}
|
||||||
|
@ -94,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
||||||
for res := range errCh {
|
for res := range errCh {
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
|
if errors.Is(res.err, blobstor.ErrInitBlobovniczas) {
|
||||||
e.log.Error("could not initialize shard, closing and skipping",
|
e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping,
|
||||||
zap.String("id", res.id),
|
zap.String("id", res.id),
|
||||||
zap.Error(res.err))
|
zap.Error(res.err))
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
||||||
|
|
||||||
err := sh.Close()
|
err := sh.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("could not close partially initialized shard",
|
e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard,
|
||||||
zap.String("id", res.id),
|
zap.String("id", res.id),
|
||||||
zap.Error(res.err))
|
zap.Error(res.err))
|
||||||
}
|
}
|
||||||
|
@ -149,7 +150,7 @@ func (e *StorageEngine) close(releasePools bool) error {
|
||||||
|
|
||||||
for id, sh := range e.shards {
|
for id, sh := range e.shards {
|
||||||
if err := sh.Close(); err != nil {
|
if err := sh.Close(); err != nil {
|
||||||
e.log.Debug("could not close shard",
|
e.log.Debug(logs.EngineCouldNotCloseShard,
|
||||||
zap.String("id", id),
|
zap.String("id", id),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -309,7 +310,7 @@ loop:
|
||||||
for _, p := range shardsToReload {
|
for _, p := range shardsToReload {
|
||||||
err := p.sh.Reload(p.opts...)
|
err := p.sh.Reload(p.opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("could not reload a shard",
|
e.log.Error(logs.EngineCouldNotReloadAShard,
|
||||||
zap.Stringer("shard id", p.sh.ID()),
|
zap.Stringer("shard id", p.sh.ID()),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -338,7 +339,7 @@ loop:
|
||||||
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
return fmt.Errorf("could not add %s shard: %w", idStr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("added new shard", zap.String("id", idStr))
|
e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
@ -136,7 +137,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
||||||
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
|
e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
|
||||||
res, err := sh.Select(selectPrm)
|
res, err := sh.Select(selectPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn("error during searching for object children",
|
e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return false
|
return false
|
||||||
|
@ -147,7 +148,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
|
||||||
|
|
||||||
_, err = sh.Inhume(ctx, inhumePrm)
|
_, err = sh.Inhume(ctx, inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Debug("could not inhume object in shard",
|
e.log.Debug(logs.EngineCouldNotInhumeObjectInShard,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.String("err", err.Error()))
|
zap.String("err", err.Error()))
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
|
@ -87,24 +88,24 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32) {
|
||||||
sid := sh.ID()
|
sid := sh.ID()
|
||||||
err := sh.SetMode(mode.DegradedReadOnly)
|
err := sh.SetMode(mode.DegradedReadOnly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("failed to move shard in degraded-read-only mode, moving to read-only",
|
e.log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly,
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error count", errCount),
|
zap.Uint32("error count", errCount),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
|
||||||
err = sh.SetMode(mode.ReadOnly)
|
err = sh.SetMode(mode.ReadOnly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("failed to move shard in read-only mode",
|
e.log.Error(logs.EngineFailedToMoveShardInReadonlyMode,
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error count", errCount),
|
zap.Uint32("error count", errCount),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("shard is moved in read-only mode due to error threshold",
|
e.log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold,
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error count", errCount))
|
zap.Uint32("error count", errCount))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e.log.Info("shard is moved in degraded mode due to error threshold",
|
e.log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold,
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error count", errCount))
|
zap.Uint32("error count", errCount))
|
||||||
}
|
}
|
||||||
|
@ -182,7 +183,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
|
||||||
default:
|
default:
|
||||||
// For background workers we can have a lot of such errors,
|
// For background workers we can have a lot of such errors,
|
||||||
// thus logging is done with DEBUG level.
|
// thus logging is done with DEBUG level.
|
||||||
e.log.Debug("mode change is in progress, ignoring set-mode request",
|
e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest,
|
||||||
zap.Stringer("shard_id", sid),
|
zap.Stringer("shard_id", sid),
|
||||||
zap.Uint32("error_count", errCount))
|
zap.Uint32("error_count", errCount))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
|
@ -79,7 +80,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("started shards evacuation", zap.Strings("shard_ids", shardIDs))
|
e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs))
|
||||||
|
|
||||||
var res EvacuateShardRes
|
var res EvacuateShardRes
|
||||||
|
|
||||||
|
@ -89,7 +90,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (Eva
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("finished shards evacuation", zap.Strings("shard_ids", shardIDs))
|
e.log.Info(logs.EngineFinishedShardsEvacuation, zap.Strings("shard_ids", shardIDs))
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +207,7 @@ func (e *StorageEngine) tryEvacuateObject(ctx context.Context, addr oid.Address,
|
||||||
putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
|
putDone, exists := e.putToShard(ctx, shards[j].hashedShard, j, shards[j].pool, addr, object)
|
||||||
if putDone || exists {
|
if putDone || exists {
|
||||||
if putDone {
|
if putDone {
|
||||||
e.log.Debug("object is moved to another shard",
|
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
|
||||||
zap.Stringer("from", sh.ID()),
|
zap.Stringer("from", sh.ID()),
|
||||||
zap.Stringer("to", shards[j].ID()),
|
zap.Stringer("to", shards[j].ID()),
|
||||||
zap.Stringer("addr", addr))
|
zap.Stringer("addr", addr))
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
@ -83,7 +84,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e
|
||||||
if !prm.forceRemoval {
|
if !prm.forceRemoval {
|
||||||
locked, err := e.IsLocked(prm.addrs[i])
|
locked, err := e.IsLocked(prm.addrs[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn("removing an object without full locking check",
|
e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
zap.Stringer("addr", prm.addrs[i]))
|
zap.Stringer("addr", prm.addrs[i]))
|
||||||
} else if locked {
|
} else if locked {
|
||||||
|
@ -222,7 +223,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
e.log.Info("interrupt processing the expired locks", zap.Error(ctx.Err()))
|
e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
|
@ -236,7 +237,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
e.log.Info("interrupt processing the deleted locks", zap.Error(ctx.Err()))
|
e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
|
@ -118,7 +119,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
|
||||||
|
|
||||||
_, err = sh.ToMoveIt(toMoveItPrm)
|
_, err = sh.ToMoveIt(toMoveItPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Warn("could not mark object for shard relocation",
|
e.log.Warn(logs.EngineCouldNotMarkObjectForShardRelocation,
|
||||||
zap.Stringer("shard", sh.ID()),
|
zap.Stringer("shard", sh.ID()),
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
@ -135,7 +136,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, ind int,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
|
if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) ||
|
||||||
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
|
||||||
e.log.Warn("could not put object to shard",
|
e.log.Warn(logs.EngineCouldNotPutObjectToShard,
|
||||||
zap.Stringer("shard_id", sh.ID()),
|
zap.Stringer("shard_id", sh.ID()),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -42,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
||||||
prm.Concurrency = defaultRemoveDuplicatesConcurrency
|
prm.Concurrency = defaultRemoveDuplicatesConcurrency
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("starting removal of locally-redundant copies",
|
e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies,
|
||||||
zap.Int("concurrency", prm.Concurrency))
|
zap.Int("concurrency", prm.Concurrency))
|
||||||
|
|
||||||
// The mutext must be taken for the whole duration to avoid target shard being removed
|
// The mutext must be taken for the whole duration to avoid target shard being removed
|
||||||
|
@ -54,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
||||||
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
|
// This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0.
|
||||||
// However we could change weights in future and easily forget this function.
|
// However we could change weights in future and easily forget this function.
|
||||||
for _, sh := range e.shards {
|
for _, sh := range e.shards {
|
||||||
e.log.Debug("started duplicates removal routine", zap.String("shard_id", sh.ID().String()))
|
e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.String("shard_id", sh.ID().String()))
|
||||||
ch := make(chan oid.Address)
|
ch := make(chan oid.Address)
|
||||||
|
|
||||||
errG, ctx := errgroup.WithContext(ctx)
|
errG, ctx := errgroup.WithContext(ctx)
|
||||||
|
@ -92,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err := errG.Wait(); err != nil {
|
if err := errG.Wait(); err != nil {
|
||||||
e.log.Error("finished removal of locally-redundant copies", zap.Error(err))
|
e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("finished removal of locally-redundant copies")
|
e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package engine
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
|
@ -168,7 +169,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
|
||||||
delete(e.shardPools, id)
|
delete(e.shardPools, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.log.Info("shard has been removed",
|
e.log.Info(logs.EngineShardHasBeenRemoved,
|
||||||
zap.String("id", id))
|
zap.String("id", id))
|
||||||
}
|
}
|
||||||
e.mtx.Unlock()
|
e.mtx.Unlock()
|
||||||
|
@ -176,7 +177,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
|
||||||
for _, sh := range ss {
|
for _, sh := range ss {
|
||||||
err := sh.Close()
|
err := sh.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.log.Error("could not close removed shard",
|
e.log.Error(logs.EngineCouldNotCloseRemovedShard,
|
||||||
zap.Stringer("id", sh.ID()),
|
zap.Stringer("id", sh.ID()),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||||
|
@ -25,7 +26,7 @@ func (db *DB) Open(readOnly bool) error {
|
||||||
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
db.log.Debug("created directory for Metabase", zap.String("path", db.info.Path))
|
db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
|
||||||
|
|
||||||
if db.boltOptions == nil {
|
if db.boltOptions == nil {
|
||||||
opts := *bbolt.DefaultOptions
|
opts := *bbolt.DefaultOptions
|
||||||
|
@ -46,9 +47,9 @@ func (db *DB) openBolt() error {
|
||||||
db.boltDB.MaxBatchDelay = db.boltBatchDelay
|
db.boltDB.MaxBatchDelay = db.boltBatchDelay
|
||||||
db.boltDB.MaxBatchSize = db.boltBatchSize
|
db.boltDB.MaxBatchSize = db.boltBatchSize
|
||||||
|
|
||||||
db.log.Debug("opened boltDB instance for Metabase")
|
db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase)
|
||||||
|
|
||||||
db.log.Debug("checking metabase version")
|
db.log.Debug(logs.MetabaseCheckingMetabaseVersion)
|
||||||
return db.boltDB.View(func(tx *bbolt.Tx) error {
|
return db.boltDB.View(func(tx *bbolt.Tx) error {
|
||||||
// The safest way to check if the metabase is fresh is to check if it has no buckets.
|
// The safest way to check if the metabase is fresh is to check if it has no buckets.
|
||||||
// However, shard info can be present. So here we check that the number of buckets is
|
// However, shard info can be present. So here we check that the number of buckets is
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -267,7 +268,7 @@ func (db *DB) selectFromFKBT(
|
||||||
) { //
|
) { //
|
||||||
matchFunc, ok := db.matchers[f.Operation()]
|
matchFunc, ok := db.matchers[f.Operation()]
|
||||||
if !ok {
|
if !ok {
|
||||||
db.log.Debug("missing matcher", zap.Uint32("operation", uint32(f.Operation())))
|
db.log.Debug(logs.MetabaseMissingMatcher, zap.Uint32("operation", uint32(f.Operation())))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -290,7 +291,7 @@ func (db *DB) selectFromFKBT(
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
db.log.Debug("error in FKBT selection", zap.String("error", err.Error()))
|
db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,13 +361,13 @@ func (db *DB) selectFromList(
|
||||||
case object.MatchStringEqual:
|
case object.MatchStringEqual:
|
||||||
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
|
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error()))
|
db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
fMatch, ok := db.matchers[op]
|
fMatch, ok := db.matchers[op]
|
||||||
if !ok {
|
if !ok {
|
||||||
db.log.Debug("unknown operation", zap.Uint32("operation", uint32(op)))
|
db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -374,7 +375,7 @@ func (db *DB) selectFromList(
|
||||||
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
|
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(key, val []byte) error {
|
||||||
l, err := decodeList(val)
|
l, err := decodeList(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
db.log.Debug("can't decode list bucket leaf",
|
db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -385,7 +386,7 @@ func (db *DB) selectFromList(
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
db.log.Debug("can't iterate over the bucket",
|
db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -429,7 +430,7 @@ func (db *DB) selectObjectID(
|
||||||
default:
|
default:
|
||||||
fMatch, ok := db.matchers[op]
|
fMatch, ok := db.matchers[op]
|
||||||
if !ok {
|
if !ok {
|
||||||
db.log.Debug("unknown operation",
|
db.log.Debug(logs.MetabaseUnknownOperation,
|
||||||
zap.Uint32("operation", uint32(f.Operation())),
|
zap.Uint32("operation", uint32(f.Operation())),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -451,7 +452,7 @@ func (db *DB) selectObjectID(
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
db.log.Debug("could not iterate over the buckets",
|
db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
|
@ -15,7 +16,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Shard) handleMetabaseFailure(stage string, err error) error {
|
func (s *Shard) handleMetabaseFailure(stage string, err error) error {
|
||||||
s.log.Error("metabase failure, switching mode",
|
s.log.Error(logs.ShardMetabaseFailureSwitchingMode,
|
||||||
zap.String("stage", stage),
|
zap.String("stage", stage),
|
||||||
zap.Stringer("mode", mode.ReadOnly),
|
zap.Stringer("mode", mode.ReadOnly),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
@ -25,7 +26,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Error("can't move shard to readonly, switch mode",
|
s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode,
|
||||||
zap.String("stage", stage),
|
zap.String("stage", stage),
|
||||||
zap.Stringer("mode", mode.DegradedReadOnly),
|
zap.Stringer("mode", mode.DegradedReadOnly),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
@ -167,7 +168,7 @@ func (s *Shard) refillMetabase() error {
|
||||||
|
|
||||||
err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
|
err = blobstor.IterateBinaryObjects(s.blobStor, func(addr oid.Address, data []byte, descriptor []byte) error {
|
||||||
if err := obj.Unmarshal(data); err != nil {
|
if err := obj.Unmarshal(data); err != nil {
|
||||||
s.log.Warn("could not unmarshal object",
|
s.log.Warn(logs.ShardCouldNotUnmarshalObject,
|
||||||
zap.Stringer("address", addr),
|
zap.Stringer("address", addr),
|
||||||
zap.String("err", err.Error()))
|
zap.String("err", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
|
@ -274,7 +275,7 @@ func (s *Shard) Close() error {
|
||||||
for _, component := range components {
|
for _, component := range components {
|
||||||
if err := component.Close(); err != nil {
|
if err := component.Close(); err != nil {
|
||||||
lastErr = err
|
lastErr = err
|
||||||
s.log.Error("could not close shard component", zap.Error(err))
|
s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +303,7 @@ func (s *Shard) Reload(opts ...Option) error {
|
||||||
ok, err := s.metaBase.Reload(c.metaOpts...)
|
ok, err := s.metaBase.Reload(c.metaOpts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, meta.ErrDegradedMode) {
|
if errors.Is(err, meta.ErrDegradedMode) {
|
||||||
s.log.Error("can't open metabase, move to a degraded mode", zap.Error(err))
|
s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err))
|
||||||
_ = s.setMode(mode.DegradedReadOnly)
|
_ = s.setMode(mode.DegradedReadOnly)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -318,12 +319,12 @@ func (s *Shard) Reload(opts ...Option) error {
|
||||||
err = s.metaBase.Init()
|
err = s.metaBase.Init()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Error("can't initialize metabase, move to a degraded-read-only mode", zap.Error(err))
|
s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err))
|
||||||
_ = s.setMode(mode.DegradedReadOnly)
|
_ = s.setMode(mode.DegradedReadOnly)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Info("trying to restore read-write mode")
|
s.log.Info(logs.ShardTryingToRestoreReadwriteMode)
|
||||||
return s.setMode(mode.ReadWrite)
|
return s.setMode(mode.ReadWrite)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package shard
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
|
||||||
|
@ -49,7 +50,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
|
||||||
if s.hasWriteCache() {
|
if s.hasWriteCache() {
|
||||||
err := s.writeCache.Delete(prm.addr[i])
|
err := s.writeCache.Delete(prm.addr[i])
|
||||||
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
|
if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) {
|
||||||
s.log.Warn("can't delete object from write cache", zap.String("error", err.Error()))
|
s.log.Warn(logs.ShardCantDeleteObjectFromWriteCache, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +59,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
|
||||||
|
|
||||||
res, err := s.metaBase.StorageID(sPrm)
|
res, err := s.metaBase.StorageID(sPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("can't get storage ID from metabase",
|
s.log.Debug(logs.ShardCantGetStorageIDFromMetabase,
|
||||||
zap.Stringer("object", prm.addr[i]),
|
zap.Stringer("object", prm.addr[i]),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
@ -100,7 +101,7 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) {
|
||||||
|
|
||||||
_, err = s.blobStor.Delete(delPrm)
|
_, err = s.blobStor.Delete(delPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("can't remove object from blobStor",
|
s.log.Debug(logs.ShardCantRemoveObjectFromBlobStor,
|
||||||
zap.Stringer("object_address", prm.addr[i]),
|
zap.Stringer("object_address", prm.addr[i]),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
|
||||||
|
@ -124,7 +125,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
|
||||||
for {
|
for {
|
||||||
event, ok := <-gc.eventChan
|
event, ok := <-gc.eventChan
|
||||||
if !ok {
|
if !ok {
|
||||||
gc.log.Warn("stop event listener by closed channel")
|
gc.log.Warn(logs.ShardStopEventListenerByClosedChannel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +150,7 @@ func (gc *gc) listenEvents(ctx context.Context) {
|
||||||
v.prevGroup.Done()
|
v.prevGroup.Done()
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
gc.log.Warn("could not submit GC job to worker pool",
|
gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -174,7 +175,7 @@ func (gc *gc) tickRemover() {
|
||||||
|
|
||||||
close(gc.eventChan)
|
close(gc.eventChan)
|
||||||
|
|
||||||
gc.log.Debug("GC is stopped")
|
gc.log.Debug(logs.ShardGCIsStopped)
|
||||||
return
|
return
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
gc.remover()
|
gc.remover()
|
||||||
|
@ -188,7 +189,7 @@ func (gc *gc) stop() {
|
||||||
gc.stopChannel <- struct{}{}
|
gc.stopChannel <- struct{}{}
|
||||||
})
|
})
|
||||||
|
|
||||||
gc.log.Info("waiting for GC workers to stop...")
|
gc.log.Info(logs.ShardWaitingForGCWorkersToStop)
|
||||||
gc.wg.Wait()
|
gc.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,7 +221,7 @@ func (s *Shard) removeGarbage() {
|
||||||
// (no more than s.rmBatchSize objects)
|
// (no more than s.rmBatchSize objects)
|
||||||
err := s.metaBase.IterateOverGarbage(iterPrm)
|
err := s.metaBase.IterateOverGarbage(iterPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("iterator over metabase graveyard failed",
|
s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -235,7 +236,7 @@ func (s *Shard) removeGarbage() {
|
||||||
// delete accumulated objects
|
// delete accumulated objects
|
||||||
_, err = s.delete(deletePrm)
|
_, err = s.delete(deletePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("could not delete the objects",
|
s.log.Warn(logs.ShardCouldNotDeleteTheObjects,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -295,7 +296,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := errGroup.Wait(); err != nil {
|
if err := errGroup.Wait(); err != nil {
|
||||||
s.log.Warn("iterator over expired objects failed", zap.String("error", err.Error()))
|
s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,7 +322,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
|
||||||
// inhume the collected objects
|
// inhume the collected objects
|
||||||
res, err := s.metaBase.Inhume(inhumePrm)
|
res, err := s.metaBase.Inhume(inhumePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("could not inhume the objects",
|
s.log.Warn(logs.ShardCouldNotInhumeTheObjects,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -342,7 +343,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
epoch := e.(newEpoch).epoch
|
epoch := e.(newEpoch).epoch
|
||||||
log := s.log.With(zap.Uint64("epoch", epoch))
|
log := s.log.With(zap.Uint64("epoch", epoch))
|
||||||
|
|
||||||
log.Debug("started expired tombstones handling")
|
log.Debug(logs.ShardStartedExpiredTombstonesHandling)
|
||||||
|
|
||||||
const tssDeleteBatch = 50
|
const tssDeleteBatch = 50
|
||||||
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
|
tss := make([]meta.TombstonedObject, 0, tssDeleteBatch)
|
||||||
|
@ -360,12 +361,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
})
|
})
|
||||||
|
|
||||||
for {
|
for {
|
||||||
log.Debug("iterating tombstones")
|
log.Debug(logs.ShardIteratingTombstones)
|
||||||
|
|
||||||
s.m.RLock()
|
s.m.RLock()
|
||||||
|
|
||||||
if s.info.Mode.NoMetabase() {
|
if s.info.Mode.NoMetabase() {
|
||||||
s.log.Debug("shard is in a degraded mode, skip collecting expired tombstones")
|
s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones)
|
||||||
s.m.RUnlock()
|
s.m.RUnlock()
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -373,7 +374,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
|
|
||||||
err := s.metaBase.IterateOverGraveyard(iterPrm)
|
err := s.metaBase.IterateOverGraveyard(iterPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("iterator over graveyard failed", zap.Error(err))
|
log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err))
|
||||||
s.m.RUnlock()
|
s.m.RUnlock()
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -392,7 +393,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("handling expired tombstones batch", zap.Int("number", len(tssExp)))
|
log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp)))
|
||||||
s.expiredTombstonesCallback(ctx, tssExp)
|
s.expiredTombstonesCallback(ctx, tssExp)
|
||||||
|
|
||||||
iterPrm.SetOffset(tss[tssLen-1].Address())
|
iterPrm.SetOffset(tss[tssLen-1].Address())
|
||||||
|
@ -400,7 +401,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) {
|
||||||
tssExp = tssExp[:0]
|
tssExp = tssExp[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("finished expired tombstones handling")
|
log.Debug(logs.ShardFinishedExpiredTombstonesHandling)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
||||||
|
@ -442,7 +443,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := errGroup.Wait(); err != nil {
|
if err := errGroup.Wait(); err != nil {
|
||||||
s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error()))
|
s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -503,7 +504,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
|
||||||
// inhume tombstones
|
// inhume tombstones
|
||||||
res, err := s.metaBase.Inhume(pInhume)
|
res, err := s.metaBase.Inhume(pInhume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("could not mark tombstones as garbage",
|
s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -523,7 +524,7 @@ func (s *Shard) HandleExpiredTombstones(tss []meta.TombstonedObject) {
|
||||||
// from graveyard
|
// from graveyard
|
||||||
err = s.metaBase.DropGraves(tss)
|
err = s.metaBase.DropGraves(tss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("could not drop expired grave records", zap.Error(err))
|
s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,7 +536,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
||||||
}
|
}
|
||||||
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
unlocked, err := s.metaBase.FreeLockedBy(lockers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("failure to unlock objects",
|
s.log.Warn(logs.ShardFailureToUnlockObjects,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -548,7 +549,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
||||||
|
|
||||||
res, err := s.metaBase.Inhume(pInhume)
|
res, err := s.metaBase.Inhume(pInhume)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("failure to mark lockers as garbage",
|
s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -570,7 +571,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
|
||||||
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
|
func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) {
|
||||||
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
|
expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("failure to get expired unlocked objects", zap.Error(err))
|
s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -589,7 +590,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) {
|
||||||
|
|
||||||
_, err := s.metaBase.FreeLockedBy(lockers)
|
_, err := s.metaBase.FreeLockedBy(lockers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("failure to unlock objects",
|
s.log.Warn(logs.ShardFailureToUnlockObjects,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/pkg/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
|
@ -126,7 +127,7 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
|
||||||
return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{})
|
return nil, false, logicerr.Wrap(apistatus.ObjectNotFound{})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.log.Warn("fetching object without meta", zap.Stringer("addr", addr))
|
s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr))
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.hasWriteCache() {
|
if s.hasWriteCache() {
|
||||||
|
@ -135,11 +136,11 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, cb storFetcher,
|
||||||
return res, false, err
|
return res, false, err
|
||||||
}
|
}
|
||||||
if IsErrNotFound(err) {
|
if IsErrNotFound(err) {
|
||||||
s.log.Debug("object is missing in write-cache",
|
s.log.Debug(logs.ShardObjectIsMissingInWritecache,
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.Bool("skip_meta", skipMeta))
|
zap.Bool("skip_meta", skipMeta))
|
||||||
} else {
|
} else {
|
||||||
s.log.Error("failed to fetch object from write-cache",
|
s.log.Error(logs.ShardFailedToFetchObjectFromWritecache,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
zap.Stringer("addr", addr),
|
zap.Stringer("addr", addr),
|
||||||
zap.Bool("skip_meta", skipMeta))
|
zap.Bool("skip_meta", skipMeta))
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -98,7 +99,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
||||||
return InhumeRes{}, ErrLockObjectRemoval
|
return InhumeRes{}, ErrLockObjectRemoval
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Debug("could not mark object to delete in metabase",
|
s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package shard
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
@ -86,7 +87,7 @@ func (s *Shard) List() (res SelectRes, err error) {
|
||||||
|
|
||||||
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
|
sRes, err := s.metaBase.Select(sPrm) // consider making List in metabase
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("can't select all objects",
|
s.log.Debug(logs.ShardCantSelectAllObjects,
|
||||||
zap.Stringer("cid", lst[i]),
|
zap.Stringer("cid", lst[i]),
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package shard
|
package shard
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -25,7 +26,7 @@ func (s *Shard) SetMode(m mode.Mode) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Shard) setMode(m mode.Mode) error {
|
func (s *Shard) setMode(m mode.Mode) error {
|
||||||
s.log.Info("setting shard mode",
|
s.log.Info(logs.ShardSettingShardMode,
|
||||||
zap.Stringer("old_mode", s.info.Mode),
|
zap.Stringer("old_mode", s.info.Mode),
|
||||||
zap.Stringer("new_mode", m))
|
zap.Stringer("new_mode", m))
|
||||||
|
|
||||||
|
@ -66,7 +67,7 @@ func (s *Shard) setMode(m mode.Mode) error {
|
||||||
s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite)
|
s.metricsWriter.SetReadonly(s.info.Mode != mode.ReadWrite)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Info("shard mode set successfully",
|
s.log.Info(logs.ShardShardModeSetSuccessfully,
|
||||||
zap.Stringer("mode", s.info.Mode))
|
zap.Stringer("mode", s.info.Mode))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package shard
|
package shard
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -38,7 +39,7 @@ func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) {
|
||||||
|
|
||||||
_, err := s.metaBase.ToMoveIt(toMovePrm)
|
_, err := s.metaBase.ToMoveIt(toMovePrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("could not mark object for shard relocation in metabase",
|
s.log.Debug(logs.ShardCouldNotMarkObjectForShardRelocationInMetabase,
|
||||||
zap.String("error", err.Error()),
|
zap.String("error", err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package shard
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
|
@ -58,7 +59,7 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
|
||||||
}
|
}
|
||||||
if err != nil || !tryCache {
|
if err != nil || !tryCache {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("can't put object to the write-cache, trying blobstor",
|
s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
|
||||||
zap.String("err", err.Error()))
|
zap.String("err", err.Error()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||||
|
@ -349,7 +350,7 @@ func (s *Shard) updateMetrics() {
|
||||||
if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
|
if s.cfg.metricsWriter != nil && !s.GetMode().NoMetabase() {
|
||||||
cc, err := s.metaBase.ObjectCounters()
|
cc, err := s.metaBase.ObjectCounters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("meta: object counter read",
|
s.log.Warn(logs.ShardMetaObjectCounterRead,
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -361,7 +362,7 @@ func (s *Shard) updateMetrics() {
|
||||||
|
|
||||||
cnrList, err := s.metaBase.Containers()
|
cnrList, err := s.metaBase.Containers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("meta: can't read container list", zap.Error(err))
|
s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,7 +371,7 @@ func (s *Shard) updateMetrics() {
|
||||||
for i := range cnrList {
|
for i := range cnrList {
|
||||||
size, err := s.metaBase.ContainerSize(cnrList[i])
|
size, err := s.metaBase.ContainerSize(cnrList[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Warn("meta: can't read container size",
|
s.log.Warn(logs.ShardMetaCantReadContainerSize,
|
||||||
zap.String("cid", cnrList[i].EncodeToString()),
|
zap.String("cid", cnrList[i].EncodeToString()),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
|
@ -134,7 +135,7 @@ func (c *cache) flushDB() {
|
||||||
|
|
||||||
c.modeMtx.RUnlock()
|
c.modeMtx.RUnlock()
|
||||||
|
|
||||||
c.log.Debug("tried to flush items from write-cache",
|
c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
|
||||||
zap.Int("count", count),
|
zap.Int("count", count),
|
||||||
zap.String("start", base58.Encode(lastKey)))
|
zap.String("start", base58.Encode(lastKey)))
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
||||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||||
|
@ -54,7 +55,7 @@ func (c *cache) initFlushMarks() {
|
||||||
var errStopIter = errors.New("stop iteration")
|
var errStopIter = errors.New("stop iteration")
|
||||||
|
|
||||||
func (c *cache) fsTreeFlushMarkUpdate() {
|
func (c *cache) fsTreeFlushMarkUpdate() {
|
||||||
c.log.Info("filling flush marks for objects in FSTree")
|
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInFSTree)
|
||||||
|
|
||||||
var prm common.IteratePrm
|
var prm common.IteratePrm
|
||||||
prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error {
|
prm.LazyHandler = func(addr oid.Address, _ func() ([]byte, error)) error {
|
||||||
|
@ -86,11 +87,11 @@ func (c *cache) fsTreeFlushMarkUpdate() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, _ = c.fsTree.Iterate(prm)
|
_, _ = c.fsTree.Iterate(prm)
|
||||||
c.log.Info("finished updating FSTree flush marks")
|
c.log.Info(logs.WritecacheFinishedUpdatingFSTreeFlushMarks)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) dbFlushMarkUpdate() {
|
func (c *cache) dbFlushMarkUpdate() {
|
||||||
c.log.Info("filling flush marks for objects in database")
|
c.log.Info(logs.WritecacheFillingFlushMarksForObjectsInDatabase)
|
||||||
|
|
||||||
var m []string
|
var m []string
|
||||||
var indices []int
|
var indices []int
|
||||||
|
@ -158,7 +159,7 @@ func (c *cache) dbFlushMarkUpdate() {
|
||||||
lastKey = append([]byte(m[len(m)-1]), 0)
|
lastKey = append([]byte(m[len(m)-1]), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.log.Info("finished updating flush marks")
|
c.log.Info(logs.WritecacheFinishedUpdatingFlushMarks)
|
||||||
}
|
}
|
||||||
|
|
||||||
// flushStatus returns info about the object state in the main storage.
|
// flushStatus returns info about the object state in the main storage.
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
||||||
)
|
)
|
||||||
|
@ -59,7 +60,7 @@ func (c *cache) setMode(m mode.Mode) error {
|
||||||
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
|
// flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty
|
||||||
// guarantees that there are no in-fly operations.
|
// guarantees that there are no in-fly operations.
|
||||||
for len(c.flushCh) != 0 {
|
for len(c.flushCh) != 0 {
|
||||||
c.log.Info("waiting for channels to flush")
|
c.log.Info(logs.WritecacheWaitingForChannelsToFlush)
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
|
||||||
|
@ -124,7 +125,7 @@ func (c *cache) deleteFromDB(keys []string) []string {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.log.Error("can't remove objects from the database", zap.Error(err))
|
c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(keys, keys[errorIndex:])
|
copy(keys, keys[errorIndex:])
|
||||||
|
@ -141,13 +142,13 @@ func (c *cache) deleteFromDisk(keys []string) []string {
|
||||||
|
|
||||||
for i := range keys {
|
for i := range keys {
|
||||||
if err := addr.DecodeString(keys[i]); err != nil {
|
if err := addr.DecodeString(keys[i]); err != nil {
|
||||||
c.log.Error("can't parse address", zap.String("address", keys[i]))
|
c.log.Error(logs.WritecacheCantParseAddress, zap.String("address", keys[i]))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
|
_, err := c.fsTree.Delete(common.DeletePrm{Address: addr})
|
||||||
if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
|
if err != nil && !errors.As(err, new(apistatus.ObjectNotFound)) {
|
||||||
c.log.Error("can't remove object from write-cache", zap.Error(err))
|
c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
|
||||||
|
|
||||||
// Save the key for the next iteration.
|
// Save the key for the next iteration.
|
||||||
keys[copyIndex] = keys[i]
|
keys[copyIndex] = keys[i]
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
lru "github.com/hashicorp/golang-lru/v2"
|
lru "github.com/hashicorp/golang-lru/v2"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
||||||
|
@ -186,7 +187,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string,
|
||||||
return fmt.Errorf("could not invoke %s: %w", method, err)
|
return fmt.Errorf("could not invoke %s: %w", method, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("neo client invoke",
|
c.logger.Debug(logs.ClientNeoClientInvoke,
|
||||||
zap.String("method", method),
|
zap.String("method", method),
|
||||||
zap.Uint32("vub", vub),
|
zap.Uint32("vub", vub),
|
||||||
zap.Stringer("tx_hash", txHash.Reverse()))
|
zap.Stringer("tx_hash", txHash.Reverse()))
|
||||||
|
@ -269,7 +270,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("native gas transfer invoke",
|
c.logger.Debug(logs.ClientNativeGasTransferInvoke,
|
||||||
zap.String("to", receiver.StringLE()),
|
zap.String("to", receiver.StringLE()),
|
||||||
zap.Stringer("tx_hash", txHash.Reverse()),
|
zap.Stringer("tx_hash", txHash.Reverse()),
|
||||||
zap.Uint32("vub", vub))
|
zap.Uint32("vub", vub))
|
||||||
|
@ -303,7 +304,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.logger.Debug("batch gas transfer invoke",
|
c.logger.Debug(logs.ClientBatchGasTransferInvoke,
|
||||||
zap.Strings("to", receiversLog),
|
zap.Strings("to", receiversLog),
|
||||||
zap.Stringer("tx_hash", txHash.Reverse()),
|
zap.Stringer("tx_hash", txHash.Reverse()),
|
||||||
zap.Uint32("vub", vub))
|
zap.Uint32("vub", vub))
|
||||||
|
@ -330,7 +331,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
||||||
|
|
||||||
height, err = c.rpcActor.GetBlockCount()
|
height, err = c.rpcActor.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error("can't get blockchain height",
|
c.logger.Error(logs.ClientCantGetBlockchainHeight,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -344,7 +345,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
|
||||||
|
|
||||||
newHeight, err = c.rpcActor.GetBlockCount()
|
newHeight, err = c.rpcActor.GetBlockCount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.logger.Error("can't get blockchain height",
|
c.logger.Error(logs.ClientCantGetBlockchainHeight243,
|
||||||
zap.String("error", err.Error()))
|
zap.String("error", err.Error()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue