[#1689] linter: Fix staticcheck warning: 'embedded field can be simplified'

Change-Id: I8f454f7d09973cdea096495c3949b88cdd01102e
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
This commit is contained in:
Alexander Chuprov 2025-04-07 16:54:58 +03:00 committed by Aleksandr Chuprov
parent 923f0acf8f
commit 6f7b6b65f3
34 changed files with 121 additions and 121 deletions

View file

@ -242,7 +242,7 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
script := sub.Bytes() script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.BinWriter.WriteBytes(script) bw.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0) emit.Opcodes(bw.BinWriter, opcode.PUSH0)
} }

View file

@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++ f.historyPointer++
// Stop iterating over history. // Stop iterating over history.
if f.historyPointer == len(f.history) { if f.historyPointer == len(f.history) {
f.InputField.SetText(f.currentContent) f.SetText(f.currentContent)
return return
} }
f.InputField.SetText(f.history[f.historyPointer]) f.SetText(f.history[f.historyPointer])
case tcell.KeyUp: case tcell.KeyUp:
if len(f.history) == 0 { if len(f.history) == 0 {
return return
} }
// Start iterating over history. // Start iterating over history.
if f.historyPointer == len(f.history) { if f.historyPointer == len(f.history) {
f.currentContent = f.InputField.GetText() f.currentContent = f.GetText()
} }
// End of history. // End of history.
if f.historyPointer == 0 { if f.historyPointer == 0 {
@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
} }
// Iterate to least recent prompts. // Iterate to least recent prompts.
f.historyPointer-- f.historyPointer--
f.InputField.SetText(f.history[f.historyPointer]) f.SetText(f.history[f.historyPointer])
default: default:
f.InputField.InputHandler()(event, func(tview.Primitive) {}) f.InputField.InputHandler()(event, func(tview.Primitive) {})
} }

View file

@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
} }
ui.Box.MouseHandler() ui.MouseHandler()
} }
func (ui *UI) WithPrompt(prompt string) error { func (ui *UI) WithPrompt(prompt string) error {

View file

@ -14,7 +14,7 @@ import (
func initAPEManagerService(c *cfg) { func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage( contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
c.shared.key, c.key,
c.cfgMorph.proxyScriptHash, c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)

View file

@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) {
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
fatalOnErr(err) fatalOnErr(err)
c.shared.cnrClient = wrap c.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap) cnrSrc := cntClient.AsContainerSource(wrap)
@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
} }
c.shared.frostfsidClient = frostfsIDSubjectProvider c.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService( service := containerService.NewSignService(
&c.key.PrivateKey, &c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr, containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
containerService.NewSplitterService( containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc, c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),

View file

@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) { func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool var updated bool
// check if it has been inited before // check if it has been inited before
if c.dynamicConfiguration.metrics == nil { if c.metrics == nil {
c.dynamicConfiguration.metrics = new(httpComponent) c.metrics = new(httpComponent)
c.dynamicConfiguration.metrics.cfg = c c.metrics.cfg = c
c.dynamicConfiguration.metrics.name = "metrics" c.metrics.name = "metrics"
c.dynamicConfiguration.metrics.handler = metrics.Handler() c.metrics.handler = metrics.Handler()
updated = true updated = true
} }
// (re)init read configuration // (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg) enabled := metricsconfig.Enabled(c.appCfg)
if enabled != c.dynamicConfiguration.metrics.enabled { if enabled != c.metrics.enabled {
c.dynamicConfiguration.metrics.enabled = enabled c.metrics.enabled = enabled
updated = true updated = true
} }
address := metricsconfig.Address(c.appCfg) address := metricsconfig.Address(c.appCfg)
if address != c.dynamicConfiguration.metrics.address { if address != c.metrics.address {
c.dynamicConfiguration.metrics.address = address c.metrics.address = address
updated = true updated = true
} }
dur := metricsconfig.ShutdownTimeout(c.appCfg) dur := metricsconfig.ShutdownTimeout(c.appCfg)
if dur != c.dynamicConfiguration.metrics.shutdownDur { if dur != c.metrics.shutdownDur {
c.dynamicConfiguration.metrics.shutdownDur = dur c.metrics.shutdownDur = dur
updated = true updated = true
} }
return c.dynamicConfiguration.metrics, updated return c.metrics, updated
} }
func enableMetricsSvc(c *cfg) { func enableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Enable() c.metricsSvc.Enable()
} }
func disableMetricsSvc(c *cfg) { func disableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Disable() c.metricsSvc.Disable()
} }

View file

@ -186,9 +186,9 @@ func initObjectService(c *cfg) {
respSvc, respSvc,
) )
c.shared.metricsSvc = objectService.NewMetricCollector( c.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc) server := objectTransportGRPC.New(auditSvc)
@ -432,7 +432,7 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
c.shared.frostfsidClient, c.frostfsidClient,
c.netMapSource, c.netMapSource,
c.cfgNetmap.state, c.cfgNetmap.state,
c.cfgObject.cnrSource, c.cfgObject.cnrSource,

View file

@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) {
func pprofComponent(c *cfg) (*httpComponent, bool) { func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool var updated bool
// check if it has been inited before // check if it has been inited before
if c.dynamicConfiguration.pprof == nil { if c.pprof == nil {
c.dynamicConfiguration.pprof = new(httpComponent) c.pprof = new(httpComponent)
c.dynamicConfiguration.pprof.cfg = c c.pprof.cfg = c
c.dynamicConfiguration.pprof.name = "pprof" c.pprof.name = "pprof"
c.dynamicConfiguration.pprof.handler = httputil.Handler() c.pprof.handler = httputil.Handler()
c.dynamicConfiguration.pprof.preReload = tuneProfilers c.pprof.preReload = tuneProfilers
updated = true updated = true
} }
// (re)init read configuration // (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg) enabled := profilerconfig.Enabled(c.appCfg)
if enabled != c.dynamicConfiguration.pprof.enabled { if enabled != c.pprof.enabled {
c.dynamicConfiguration.pprof.enabled = enabled c.pprof.enabled = enabled
updated = true updated = true
} }
address := profilerconfig.Address(c.appCfg) address := profilerconfig.Address(c.appCfg)
if address != c.dynamicConfiguration.pprof.address { if address != c.pprof.address {
c.dynamicConfiguration.pprof.address = address c.pprof.address = address
updated = true updated = true
} }
dur := profilerconfig.ShutdownTimeout(c.appCfg) dur := profilerconfig.ShutdownTimeout(c.appCfg)
if dur != c.dynamicConfiguration.pprof.shutdownDur { if dur != c.pprof.shutdownDur {
c.dynamicConfiguration.pprof.shutdownDur = dur c.pprof.shutdownDur = dur
updated = true updated = true
} }
return c.dynamicConfiguration.pprof, updated return c.pprof, updated
} }
func tuneProfilers(c *cfg) { func tuneProfilers(c *cfg) {

View file

@ -51,9 +51,9 @@ func initTreeService(c *cfg) {
c.treeService = tree.New( c.treeService = tree.New(
tree.WithContainerSource(cnrSource{ tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource, src: c.cfgObject.cnrSource,
cli: c.shared.cnrClient, cli: c.cnrClient,
}), }),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
tree.WithNetmapSource(c.netMapSource), tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey), tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log), tree.WithLogger(c.log),

View file

@ -153,5 +153,5 @@ func WithMetrics(m Metrics) Option {
} }
func (b *BlobStor) Compressor() *compression.Config { func (b *BlobStor) Compressor() *compression.Config {
return &b.cfg.compression return &b.compression
} }

View file

@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
var csPrm shard.ContainerSizePrm var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr) csPrm.SetContainerID(prm.cnr)
csRes, err := sh.Shard.ContainerSize(ctx, csPrm) csRes, err := sh.ContainerSize(ctx, csPrm)
if err != nil { if err != nil {
e.reportShardError(ctx, sh, "can't get container size", err, e.reportShardError(ctx, sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr)) zap.Stringer("container_id", prm.cnr))
@ -119,7 +119,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
uniqueIDs := make(map[string]cid.ID) uniqueIDs := make(map[string]cid.ID)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil { if err != nil {
e.reportShardError(ctx, sh, "can't get list of containers", err) e.reportShardError(ctx, sh, "can't get list of containers", err)
return false return false

View file

@ -77,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
errCh := make(chan shardInitError, len(e.shards)) errCh := make(chan shardInitError, len(e.shards))
var eg errgroup.Group var eg errgroup.Group
if e.cfg.lowMem && e.anyShardRequiresRefill() { if e.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1) eg.SetLimit(1)
} }

View file

@ -227,7 +227,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var outErr error var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.Shard.IsLocked(ctx, addr) locked, err = h.IsLocked(ctx, addr)
if err != nil { if err != nil {
e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
outErr = err outErr = err
@ -256,7 +256,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I
var outErr error var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locks, err := h.Shard.GetLocks(ctx, addr) locks, err := h.GetLocks(ctx, addr)
if err != nil { if err != nil {
e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
outErr = err outErr = err

View file

@ -118,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
} }
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) e.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil return sh.ID(), nil
} }

View file

@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data) lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:]) lm.Parent = binary.LittleEndian.Uint64(data[8:])
return lm.Meta.FromBytes(data[16:]) return lm.FromBytes(data[16:])
} }
func (t *boltForest) logToBytes(lm *Move) []byte { func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter() w := io.NewBufBinWriter()
size := 8 + 8 + lm.Meta.Size() + 1 size := 8 + 8 + lm.Size() + 1
// if lm.HasOld { // if lm.HasOld {
// size += 8 + lm.Old.Meta.Size() // size += 8 + lm.Old.Meta.Size()
// } // }
@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size) w.Grow(size)
w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent) w.WriteU64LE(lm.Parent)
lm.Meta.EncodeBinary(w.BinWriter) lm.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld) // w.WriteBool(lm.HasOld)
// if lm.HasOld { // if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent) // w.WriteU64LE(lm.Old.Parent)

View file

@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo var res []NodeInfo
for _, nodeID := range nodeIDs { for _, nodeID := range nodeIDs {
children := s.tree.getChildren(nodeID) children := s.getChildren(nodeID)
for _, childID := range children { for _, childID := range children {
var found bool var found bool
for _, kv := range s.infoMap[childID].Meta.Items { for _, kv := range s.infoMap[childID].Meta.Items {
@ -222,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound return nil, ErrTreeNotFound
} }
children := s.tree.getChildren(nodeID) children := s.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children)) res := make([]NodeInfo, 0, len(children))
for _, childID := range children { for _, childID := range children {
res = append(res, NodeInfo{ res = append(res, NodeInfo{

View file

@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place. // undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) { func (s *memoryTree) undo(op *move) {
if op.HasOld { if op.HasOld {
s.tree.infoMap[op.Child] = op.Old s.infoMap[op.Child] = op.Old
} else { } else {
delete(s.tree.infoMap, op.Child) delete(s.infoMap, op.Child)
} }
} }
@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
}, },
} }
shouldPut := !s.tree.isAncestor(op.Child, op.Parent) shouldPut := !s.isAncestor(op.Child, op.Parent)
p, ok := s.tree.infoMap[op.Child] p, ok := s.infoMap[op.Child]
if ok { if ok {
lm.HasOld = true lm.HasOld = true
lm.Old = p lm.Old = p
@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m p.Meta = m
p.Parent = op.Parent p.Parent = op.Parent
s.tree.infoMap[op.Child] = p s.infoMap[op.Child] = p
return lm return lm
} }

View file

@ -214,8 +214,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
} }
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
if s.cfg.refillMetabaseWorkersCount > 0 { if s.refillMetabaseWorkersCount > 0 {
eg.SetLimit(s.cfg.refillMetabaseWorkersCount) eg.SetLimit(s.refillMetabaseWorkersCount)
} }
var completedCount uint64 var completedCount uint64

View file

@ -320,8 +320,8 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
} }
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return return
} }

View file

@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
} }
shardID := s.info.ID.String() shardID := s.info.ID.String()
s.cfg.metricsWriter.SetShardID(shardID) s.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil { if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID) s.writeCache.GetMetrics().SetShardID(shardID)
} }

View file

@ -218,7 +218,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards. // hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool { func (s *Shard) hasWriteCache() bool {
return s.cfg.useWriteCache return s.useWriteCache
} }
// NeedRefillMetabase returns true if metabase is needed to be refilled. // NeedRefillMetabase returns true if metabase is needed to be refilled.
@ -379,15 +379,15 @@ func WithLimiter(l qos.Limiter) Option {
} }
func (s *Shard) fillInfo() { func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() s.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() s.info.BlobStorInfo = s.blobStor.DumpInfo()
s.cfg.info.Mode = s.GetMode() s.info.Mode = s.GetMode()
if s.cfg.useWriteCache { if s.useWriteCache {
s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() s.info.WriteCacheInfo = s.writeCache.DumpInfo()
} }
if s.pilorama != nil { if s.pilorama != nil {
s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() s.info.PiloramaInfo = s.pilorama.DumpInfo()
} }
} }
@ -454,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
} }
s.cfg.metricsWriter.SetMode(s.info.Mode) s.metricsWriter.SetMode(s.info.Mode)
} }
// incObjectCounter increment both physical and logical object // incObjectCounter increment both physical and logical object
// counters. // counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
s.cfg.metricsWriter.IncObjectCounter(physical) s.metricsWriter.IncObjectCounter(physical)
s.cfg.metricsWriter.IncObjectCounter(logical) s.metricsWriter.IncObjectCounter(logical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
if isUser { if isUser {
s.cfg.metricsWriter.IncObjectCounter(user) s.metricsWriter.IncObjectCounter(user)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
} }
} }
func (s *Shard) decObjectCounterBy(typ string, v uint64) { func (s *Shard) decObjectCounterBy(typ string, v uint64) {
if v > 0 { if v > 0 {
s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) s.metricsWriter.AddToObjectCounter(typ, -int(v))
} }
} }
func (s *Shard) setObjectCounterBy(typ string, v uint64) { func (s *Shard) setObjectCounterBy(typ string, v uint64) {
if v > 0 { if v > 0 {
s.cfg.metricsWriter.SetObjectCounter(typ, v) s.metricsWriter.SetObjectCounter(typ, v)
} }
} }
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
for cnrID, count := range byCnr { for cnrID, count := range byCnr {
if count.Phy > 0 { if count.Phy > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
} }
if count.Logic > 0 { if count.Logic > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
} }
if count.User > 0 { if count.User > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
} }
} }
} }
func (s *Shard) addToContainerSize(cnr string, size int64) { func (s *Shard) addToContainerSize(cnr string, size int64) {
if size != 0 { if size != 0 {
s.cfg.metricsWriter.AddToContainerSize(cnr, size) s.metricsWriter.AddToContainerSize(cnr, size)
} }
} }
func (s *Shard) addToPayloadSize(size int64) { func (s *Shard) addToPayloadSize(size int64) {
if size != 0 { if size != 0 {
s.cfg.metricsWriter.AddToPayloadSize(size) s.metricsWriter.AddToPayloadSize(size)
} }
} }

View file

@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest
TreeId: treeID, TreeId: treeID,
Operation: &tree.LogMove{ Operation: &tree.LogMove{
ParentId: op.Parent, ParentId: op.Parent,
Meta: op.Meta.Bytes(), Meta: op.Bytes(),
ChildId: op.Child, ChildId: op.Child,
}, },
}, },

View file

@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
} }
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...) traverser, err := placement.NewTraverser(ctx, n.Opts...)
if err != nil { if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err) return fmt.Errorf("could not create object placement traverser: %w", err)
} }
@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
} }
// perform additional container broadcast if needed // perform additional container broadcast if needed
if n.Traversal.submitPrimaryPlacementFinish() { if n.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f) err := n.ForEachNode(ctx, f)
if err != nil { if err != nil {
n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
@ -101,7 +101,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
// in subsequent container broadcast. Note that we don't // in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement // process this node during broadcast if primary placement
// on it failed. // on it failed.
n.Traversal.submitProcessed(addr, item) n.submitProcessed(addr, item)
} }
wg.Wait() wg.Wait()

View file

@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx) detachedExecutor.execute(ctx)
return detachedExecutor.statusError.err return detachedExecutor.err
} }

View file

@ -87,7 +87,7 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx) exec.execute(ctx)
return exec.statusError.err return exec.err
} }
func (exec *request) execute(ctx context.Context) { func (exec *request) execute(ctx context.Context) {

View file

@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader()) p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature()) p.SetSignature(objV2.GetSignature())
return s.GetObjectStream.Send(newResponse(p)) return s.Send(newResponse(p))
} }
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk) p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk) p.SetChunk(chunk)
return s.GetObjectStream.Send(newResponse(p)) return s.Send(newResponse(p))
} }
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
} }
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) return s.Send(newRangeResponse(chunk))
} }
func newRangeResponse(p []byte) *objectV2.GetRangeResponse { func newRangeResponse(p []byte) *objectV2.GetRangeResponse {

View file

@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config,
// Patch calls internal service and returns v2 object streamer. // Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) { func (s *Service) Patch() (object.PatchObjectStream, error) {
nodeKey, err := s.Config.KeyStorage.GetKey(nil) nodeKey, err := s.KeyStorage.GetKey(nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -102,7 +102,7 @@ func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Obje
return target.ErrWrongPayloadSize return target.ErrWrongPayloadSize
} }
maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx) maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
if obj.PayloadSize() > maxAllowedSize { if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize return target.ErrExceedingMaxSize
} }
@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
} }
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
iter := s.Config.NewNodeIterator(placement.placementOptions) iter := s.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{ signer := &putSingleRequestSigner{
req: req, req: req,
keyStorage: s.Config.KeyStorage, keyStorage: s.KeyStorage,
signer: &sync.Once{}, signer: &sync.Once{},
} }
@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil { if err != nil {
return err return err
} }
key, err := s.Config.KeyStorage.GetKey(nil) key, err := s.KeyStorage.GetKey(nil)
if err != nil { if err != nil {
return err return err
} }
signer := &putSingleRequestSigner{ signer := &putSingleRequestSigner{
req: req, req: req,
keyStorage: s.Config.KeyStorage, keyStorage: s.KeyStorage,
signer: &sync.Once{}, signer: &sync.Once{},
} }
@ -225,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
if !ok { if !ok {
return result, errors.New("missing container ID") return result, errors.New("missing container ID")
} }
cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID) cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
if err != nil { if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err) return result, fmt.Errorf("could not get container by ID: %w", err)
} }
@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
} }
result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource) latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
if err != nil { if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err) return result, fmt.Errorf("could not get latest network map: %w", err)
} }
builder := placement.NewNetworkMapBuilder(latestNetmap) builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly { if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
} }
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil return result, nil
@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
c, err := s.Config.ClientConstructor.Get(info) c, err := s.ClientConstructor.Get(info)
if err != nil { if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
} }
@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{ localTarget := &objectwriter.LocalTarget{
Storage: s.Config.LocalStore, Storage: s.LocalStore,
Container: container, Container: container,
} }
return localTarget.WriteObject(ctx, obj, meta) return localTarget.WriteObject(ctx, obj, meta)
@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil { if err != nil {
objID, _ := obj.ID() objID, _ := obj.ID()
cnrID, _ := obj.ContainerID() cnrID, _ := obj.ContainerID()
s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
zap.Error(err), zap.Error(err),
zap.Stringer("address", addr), zap.Stringer("address", addr),
zap.Stringer("object_id", objID), zap.Stringer("object_id", objID),

View file

@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy // current node is required node, we are happy
return ecChunkProcessResult{ return ecChunkProcessResult{
validPlacement: true, validPlacement: true,
@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total { if uint32(i) == objInfo.ECInfo.Total {
break break
} }
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { if p.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
} }
} }
@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError var eiErr *objectSDK.ECInfoError
for _, n := range nodes { for _, n := range nodes {
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { if p.netmapKeys.IsLocalKey(n.PublicKey()) {
continue continue
} }
_, err := p.remoteHeader(ctx, n, parentAddress, true) _, err := p.remoteHeader(ctx, n, parentAddress, true)
@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return return
} }
var err error var err error
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { if p.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress) _, err = p.localHeader(ctx, parentAddress)
} else { } else {
_, err = p.remoteHeader(ctx, n, parentAddress, true) _, err = p.remoteHeader(ctx, n, parentAddress, true)
@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
} }
} else if client.IsErrObjectAlreadyRemoved(err) { } else if client.IsErrObjectAlreadyRemoved(err) {
restore = false restore = false
} else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{ p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1, NumCopies: 1,
@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID() pID, _ := part.ID()
addr.SetObject(pID) addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)] targetNode := nodes[idx%len(nodes)]
if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{ p.replicator.HandleLocalPutTask(ctx, replicator.Task{
Addr: addr, Addr: addr,
Obj: part, Obj: part,
@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object var obj *objectSDK.Object
var err error var err error
for _, node := range nodes { for _, node := range nodes {
if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { if p.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID) obj, err = p.localObject(egCtx, objID)
} else { } else {
obj, err = p.remoteObject(egCtx, node, objID) obj, err = p.remoteObject(egCtx, node, objID)

View file

@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it // enable encryption if it
// was configured so // was configured so
if cfg.privateKey != nil { if cfg.privateKey != nil {
rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey) cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey) c, err := aes.NewCipher(rawKey)

View file

@ -48,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock() c.Lock()
ccInt, ok := c.LRU.Get(netmapAddr) ccInt, ok := c.Get(netmapAddr)
c.Unlock() c.Unlock()
if ok { if ok {
@ -71,9 +71,9 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
c.Lock() c.Lock()
if err != nil { if err != nil {
c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else { } else {
c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
} }
c.Unlock() c.Unlock()

View file

@ -202,7 +202,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID, TreeId: op.treeID,
Operation: &LogMove{ Operation: &LogMove{
ParentId: op.op.Parent, ParentId: op.op.Parent,
Meta: op.op.Meta.Bytes(), Meta: op.op.Bytes(),
ChildId: op.op.Child, ChildId: op.op.Child,
}, },
}, },

View file

@ -687,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{ Body: &GetOpLogResponse_Body{
Operation: &LogMove{ Operation: &LogMove{
ParentId: lm.Parent, ParentId: lm.Parent,
Meta: lm.Meta.Bytes(), Meta: lm.Bytes(),
ChildId: lm.Child, ChildId: lm.Child,
}, },
}, },

View file

@ -245,7 +245,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(), Parent: lm.GetParentId(),
Child: lm.GetChildId(), Child: lm.GetChildId(),
} }
if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { if err := m.FromBytes(lm.GetMeta()); err != nil {
return err return err
} }
select { select {
@ -415,7 +415,7 @@ func (s *Service) syncLoop(ctx context.Context) {
start := time.Now() start := time.Now()
cnrs, err := s.cfg.cnrSource.List(ctx) cnrs, err := s.cnrSource.List(ctx)
if err != nil { if err != nil {
s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false) s.metrics.AddSyncDuration(time.Since(start), false)