[#1689] linter: Fix staticcheck warning: 'embedded field can be simplified'

Change-Id: I8f454f7d09973cdea096495c3949b88cdd01102e
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
This commit is contained in:
Alexander Chuprov 2025-04-07 16:54:58 +03:00 committed by Aleksandr Chuprov
parent 923f0acf8f
commit 6f7b6b65f3
34 changed files with 121 additions and 121 deletions

View file

@ -242,7 +242,7 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu
script := sub.Bytes()
emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0})
bw.BinWriter.WriteBytes(script)
bw.WriteBytes(script)
emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1})
emit.Opcodes(bw.BinWriter, opcode.PUSH0)
}

View file

@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
f.historyPointer++
// Stop iterating over history.
if f.historyPointer == len(f.history) {
f.InputField.SetText(f.currentContent)
f.SetText(f.currentContent)
return
}
f.InputField.SetText(f.history[f.historyPointer])
f.SetText(f.history[f.historyPointer])
case tcell.KeyUp:
if len(f.history) == 0 {
return
}
// Start iterating over history.
if f.historyPointer == len(f.history) {
f.currentContent = f.InputField.GetText()
f.currentContent = f.GetText()
}
// End of history.
if f.historyPointer == 0 {
@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo
}
// Iterate to least recent prompts.
f.historyPointer--
f.InputField.SetText(f.history[f.historyPointer])
f.SetText(f.history[f.historyPointer])
default:
f.InputField.InputHandler()(event, func(tview.Primitive) {})
}

View file

@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
}
ui.Box.MouseHandler()
ui.MouseHandler()
}
func (ui *UI) WithPrompt(prompt string) error {

View file

@ -14,7 +14,7 @@ import (
func initAPEManagerService(c *cfg) {
contractStorage := ape_contract.NewProxyVerificationContractStorage(
morph.NewSwitchRPCGuardedActor(c.cfgMorph.client),
c.shared.key,
c.key,
c.cfgMorph.proxyScriptHash,
c.cfgObject.cfgAccessPolicyEngine.policyContractHash)

View file

@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) {
wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0)
fatalOnErr(err)
c.shared.cnrClient = wrap
c.cnrClient = wrap
cnrSrc := cntClient.AsContainerSource(wrap)
@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) {
frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id"))
}
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) {
service := containerService.NewSignService(
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient,
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),

View file

@ -8,38 +8,38 @@ import (
func metricsComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.dynamicConfiguration.metrics == nil {
c.dynamicConfiguration.metrics = new(httpComponent)
c.dynamicConfiguration.metrics.cfg = c
c.dynamicConfiguration.metrics.name = "metrics"
c.dynamicConfiguration.metrics.handler = metrics.Handler()
if c.metrics == nil {
c.metrics = new(httpComponent)
c.metrics.cfg = c
c.metrics.name = "metrics"
c.metrics.handler = metrics.Handler()
updated = true
}
// (re)init read configuration
enabled := metricsconfig.Enabled(c.appCfg)
if enabled != c.dynamicConfiguration.metrics.enabled {
c.dynamicConfiguration.metrics.enabled = enabled
if enabled != c.metrics.enabled {
c.metrics.enabled = enabled
updated = true
}
address := metricsconfig.Address(c.appCfg)
if address != c.dynamicConfiguration.metrics.address {
c.dynamicConfiguration.metrics.address = address
if address != c.metrics.address {
c.metrics.address = address
updated = true
}
dur := metricsconfig.ShutdownTimeout(c.appCfg)
if dur != c.dynamicConfiguration.metrics.shutdownDur {
c.dynamicConfiguration.metrics.shutdownDur = dur
if dur != c.metrics.shutdownDur {
c.metrics.shutdownDur = dur
updated = true
}
return c.dynamicConfiguration.metrics, updated
return c.metrics, updated
}
func enableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Enable()
c.metricsSvc.Enable()
}
func disableMetricsSvc(c *cfg) {
c.shared.metricsSvc.Disable()
c.metricsSvc.Disable()
}

View file

@ -186,9 +186,9 @@ func initObjectService(c *cfg) {
respSvc,
)
c.shared.metricsSvc = objectService.NewMetricCollector(
c.metricsSvc = objectService.NewMetricCollector(
signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg))
qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService)
qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService)
auditSvc := objectService.NewAuditService(qosService, c.log, c.audit)
server := objectTransportGRPC.New(auditSvc)
@ -432,7 +432,7 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc),
c.shared.frostfsidClient,
c.frostfsidClient,
c.netMapSource,
c.cfgNetmap.state,
c.cfgObject.cnrSource,

View file

@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) {
func pprofComponent(c *cfg) (*httpComponent, bool) {
var updated bool
// check if it has been inited before
if c.dynamicConfiguration.pprof == nil {
c.dynamicConfiguration.pprof = new(httpComponent)
c.dynamicConfiguration.pprof.cfg = c
c.dynamicConfiguration.pprof.name = "pprof"
c.dynamicConfiguration.pprof.handler = httputil.Handler()
c.dynamicConfiguration.pprof.preReload = tuneProfilers
if c.pprof == nil {
c.pprof = new(httpComponent)
c.pprof.cfg = c
c.pprof.name = "pprof"
c.pprof.handler = httputil.Handler()
c.pprof.preReload = tuneProfilers
updated = true
}
// (re)init read configuration
enabled := profilerconfig.Enabled(c.appCfg)
if enabled != c.dynamicConfiguration.pprof.enabled {
c.dynamicConfiguration.pprof.enabled = enabled
if enabled != c.pprof.enabled {
c.pprof.enabled = enabled
updated = true
}
address := profilerconfig.Address(c.appCfg)
if address != c.dynamicConfiguration.pprof.address {
c.dynamicConfiguration.pprof.address = address
if address != c.pprof.address {
c.pprof.address = address
updated = true
}
dur := profilerconfig.ShutdownTimeout(c.appCfg)
if dur != c.dynamicConfiguration.pprof.shutdownDur {
c.dynamicConfiguration.pprof.shutdownDur = dur
if dur != c.pprof.shutdownDur {
c.pprof.shutdownDur = dur
updated = true
}
return c.dynamicConfiguration.pprof, updated
return c.pprof, updated
}
func tuneProfilers(c *cfg) {

View file

@ -51,9 +51,9 @@ func initTreeService(c *cfg) {
c.treeService = tree.New(
tree.WithContainerSource(cnrSource{
src: c.cfgObject.cnrSource,
cli: c.shared.cnrClient,
cli: c.cnrClient,
}),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
tree.WithFrostfsidSubjectProvider(c.frostfsidClient),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log),

View file

@ -153,5 +153,5 @@ func WithMetrics(m Metrics) Option {
}
func (b *BlobStor) Compressor() *compression.Config {
return &b.cfg.compression
return &b.compression
}

View file

@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm)
var csPrm shard.ContainerSizePrm
csPrm.SetContainerID(prm.cnr)
csRes, err := sh.Shard.ContainerSize(ctx, csPrm)
csRes, err := sh.ContainerSize(ctx, csPrm)
if err != nil {
e.reportShardError(ctx, sh, "can't get container size", err,
zap.Stringer("container_id", prm.cnr))
@ -119,7 +119,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes {
uniqueIDs := make(map[string]cid.ID)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{})
res, err := sh.ListContainers(ctx, shard.ListContainersPrm{})
if err != nil {
e.reportShardError(ctx, sh, "can't get list of containers", err)
return false

View file

@ -77,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
errCh := make(chan shardInitError, len(e.shards))
var eg errgroup.Group
if e.cfg.lowMem && e.anyShardRequiresRefill() {
if e.lowMem && e.anyShardRequiresRefill() {
eg.SetLimit(1)
}

View file

@ -227,7 +227,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locked, err = h.Shard.IsLocked(ctx, addr)
locked, err = h.IsLocked(ctx, addr)
if err != nil {
e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr))
outErr = err
@ -256,7 +256,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I
var outErr error
e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
locks, err := h.Shard.GetLocks(ctx, addr)
locks, err := h.GetLocks(ctx, addr)
if err != nil {
e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr))
outErr = err

View file

@ -118,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
}
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
e.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}

View file

@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error {
func (t *boltForest) logFromBytes(lm *Move, data []byte) error {
lm.Child = binary.LittleEndian.Uint64(data)
lm.Parent = binary.LittleEndian.Uint64(data[8:])
return lm.Meta.FromBytes(data[16:])
return lm.FromBytes(data[16:])
}
func (t *boltForest) logToBytes(lm *Move) []byte {
w := io.NewBufBinWriter()
size := 8 + 8 + lm.Meta.Size() + 1
size := 8 + 8 + lm.Size() + 1
// if lm.HasOld {
// size += 8 + lm.Old.Meta.Size()
// }
@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte {
w.Grow(size)
w.WriteU64LE(lm.Child)
w.WriteU64LE(lm.Parent)
lm.Meta.EncodeBinary(w.BinWriter)
lm.EncodeBinary(w.BinWriter)
// w.WriteBool(lm.HasOld)
// if lm.HasOld {
// w.WriteU64LE(lm.Old.Parent)

View file

@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
var res []NodeInfo
for _, nodeID := range nodeIDs {
children := s.tree.getChildren(nodeID)
children := s.getChildren(nodeID)
for _, childID := range children {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
@ -222,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str
return nil, ErrTreeNotFound
}
children := s.tree.getChildren(nodeID)
children := s.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
res = append(res, NodeInfo{

View file

@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree {
// undo un-does op and changes s in-place.
func (s *memoryTree) undo(op *move) {
if op.HasOld {
s.tree.infoMap[op.Child] = op.Old
s.infoMap[op.Child] = op.Old
} else {
delete(s.tree.infoMap, op.Child)
delete(s.infoMap, op.Child)
}
}
@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move {
},
}
shouldPut := !s.tree.isAncestor(op.Child, op.Parent)
p, ok := s.tree.infoMap[op.Child]
shouldPut := !s.isAncestor(op.Child, op.Parent)
p, ok := s.infoMap[op.Child]
if ok {
lm.HasOld = true
lm.Old = p
@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move {
p.Meta = m
p.Parent = op.Parent
s.tree.infoMap[op.Child] = p
s.infoMap[op.Child] = p
return lm
}

View file

@ -214,8 +214,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
}
eg, egCtx := errgroup.WithContext(ctx)
if s.cfg.refillMetabaseWorkersCount > 0 {
eg.SetLimit(s.cfg.refillMetabaseWorkersCount)
if s.refillMetabaseWorkersCount > 0 {
eg.SetLimit(s.refillMetabaseWorkersCount)
}
var completedCount uint64

View file

@ -320,8 +320,8 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) {
}
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount)
batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize)
return
}

View file

@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
}
shardID := s.info.ID.String()
s.cfg.metricsWriter.SetShardID(shardID)
s.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}

View file

@ -218,7 +218,7 @@ func WithWriteCache(use bool) Option {
// hasWriteCache returns bool if write cache exists on shards.
func (s *Shard) hasWriteCache() bool {
return s.cfg.useWriteCache
return s.useWriteCache
}
// NeedRefillMetabase returns true if metabase is needed to be refilled.
@ -379,15 +379,15 @@ func WithLimiter(l qos.Limiter) Option {
}
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
s.cfg.info.Mode = s.GetMode()
s.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.info.BlobStorInfo = s.blobStor.DumpInfo()
s.info.Mode = s.GetMode()
if s.cfg.useWriteCache {
s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo()
if s.useWriteCache {
s.info.WriteCacheInfo = s.writeCache.DumpInfo()
}
if s.pilorama != nil {
s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo()
s.info.PiloramaInfo = s.pilorama.DumpInfo()
}
}
@ -454,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) {
s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic)
s.setContainerObjectsCount(contID.EncodeToString(), user, count.User)
}
s.cfg.metricsWriter.SetMode(s.info.Mode)
s.metricsWriter.SetMode(s.info.Mode)
}
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
s.cfg.metricsWriter.IncObjectCounter(physical)
s.cfg.metricsWriter.IncObjectCounter(logical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
s.metricsWriter.IncObjectCounter(physical)
s.metricsWriter.IncObjectCounter(logical)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
if isUser {
s.cfg.metricsWriter.IncObjectCounter(user)
s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
s.metricsWriter.IncObjectCounter(user)
s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
if v > 0 {
s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
s.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
if v > 0 {
s.cfg.metricsWriter.SetObjectCounter(typ, v)
s.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
for cnrID, count := range byCnr {
if count.Phy > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
}
if count.Logic > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic)
}
if count.User > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User)
}
}
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
if size != 0 {
s.cfg.metricsWriter.AddToContainerSize(cnr, size)
s.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
if size != 0 {
s.cfg.metricsWriter.AddToPayloadSize(size)
s.metricsWriter.AddToPayloadSize(size)
}
}

View file

@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest
TreeId: treeID,
Operation: &tree.LogMove{
ParentId: op.Parent,
Meta: op.Meta.Bytes(),
Meta: op.Bytes(),
ChildId: op.Child,
},
},

View file

@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
}
func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...)
traverser, err := placement.NewTraverser(ctx, n.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
if n.Traversal.submitPrimaryPlacementFinish() {
if n.submitPrimaryPlacementFinish() {
err := n.ForEachNode(ctx, f)
if err != nil {
n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
@ -101,7 +101,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
n.Traversal.submitProcessed(addr, item)
n.submitProcessed(addr, item)
}
wg.Wait()

View file

@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque
detachedExecutor.execute(ctx)
return detachedExecutor.statusError.err
return detachedExecutor.err
}

View file

@ -87,7 +87,7 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error {
exec.execute(ctx)
return exec.statusError.err
return exec.err
}
func (exec *request) execute(ctx context.Context) {

View file

@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec
p.SetHeader(objV2.GetHeader())
p.SetSignature(objV2.GetSignature())
return s.GetObjectStream.Send(newResponse(p))
return s.Send(newResponse(p))
}
func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error {
p := new(objectV2.GetObjectPartChunk)
p.SetChunk(chunk)
return s.GetObjectStream.Send(newResponse(p))
return s.Send(newResponse(p))
}
func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse {
}
func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error {
return s.GetObjectRangeStream.Send(newRangeResponse(chunk))
return s.Send(newRangeResponse(chunk))
}
func newRangeResponse(p []byte) *objectV2.GetRangeResponse {

View file

@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config,
// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
nodeKey, err := s.Config.KeyStorage.GetKey(nil)
nodeKey, err := s.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}

View file

@ -102,7 +102,7 @@ func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Obje
return target.ErrWrongPayloadSize
}
maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx)
maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx)
if obj.PayloadSize() > maxAllowedSize {
return target.ErrExceedingMaxSize
}
@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
iter := s.Config.NewNodeIterator(placement.placementOptions)
iter := s.NewNodeIterator(placement.placementOptions)
iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
keyStorage: s.Config.KeyStorage,
keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
key, err := s.Config.KeyStorage.GetKey(nil)
key, err := s.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
keyStorage: s.Config.KeyStorage,
keyStorage: s.KeyStorage,
signer: &sync.Once{},
}
@ -225,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
if !ok {
return result, errors.New("missing container ID")
}
cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID)
cnrInfo, err := s.ContainerSource.Get(ctx, cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource)
latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
c, err := s.Config.ClientConstructor.Get(info)
c, err := s.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
Storage: s.Config.LocalStore,
Storage: s.LocalStore,
Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
s.Logger.Warn(ctx, logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),

View file

@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult {
var removeLocalChunk bool
requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))]
if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) {
// current node is required node, we are happy
return ecChunkProcessResult{
validPlacement: true,
@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec
if uint32(i) == objInfo.ECInfo.Total {
break
}
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
if p.netmapKeys.IsLocalKey(n.PublicKey()) {
requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{}
}
}
@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad
func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool {
var eiErr *objectSDK.ECInfoError
for _, n := range nodes {
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
if p.netmapKeys.IsLocalKey(n.PublicKey()) {
continue
}
_, err := p.remoteHeader(ctx, n, parentAddress, true)
@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
return
}
var err error
if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) {
if p.netmapKeys.IsLocalKey(n.PublicKey()) {
_, err = p.localHeader(ctx, parentAddress)
} else {
_, err = p.remoteHeader(ctx, n, parentAddress, true)
@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
}
} else if client.IsErrObjectAlreadyRemoved(err) {
restore = false
} else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
} else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total {
p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err))
p.replicator.HandleReplicationTask(ctx, replicator.Task{
NumCopies: 1,
@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
pID, _ := part.ID()
addr.SetObject(pID)
targetNode := nodes[idx%len(nodes)]
if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
Addr: addr,
Obj: part,
@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
var obj *objectSDK.Object
var err error
for _, node := range nodes {
if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
if p.netmapKeys.IsLocalKey(node.PublicKey()) {
obj, err = p.localObject(egCtx, objID)
} else {
obj, err = p.remoteObject(egCtx, node, objID)

View file

@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) {
// enable encryption if it
// was configured so
if cfg.privateKey != nil {
rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8)
rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8)
cfg.privateKey.D.FillBytes(rawKey)
c, err := aes.NewCipher(rawKey)

View file

@ -48,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
c.Lock()
ccInt, ok := c.LRU.Get(netmapAddr)
ccInt, ok := c.Get(netmapAddr)
c.Unlock()
if ok {
@ -71,9 +71,9 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
c.Lock()
if err != nil {
c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry})
} else {
c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry})
}
c.Unlock()

View file

@ -202,7 +202,7 @@ func newApplyRequest(op *movePair) *ApplyRequest {
TreeId: op.treeID,
Operation: &LogMove{
ParentId: op.op.Parent,
Meta: op.op.Meta.Bytes(),
Meta: op.op.Bytes(),
ChildId: op.op.Child,
},
},

View file

@ -687,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer)
Body: &GetOpLogResponse_Body{
Operation: &LogMove{
ParentId: lm.Parent,
Meta: lm.Meta.Bytes(),
Meta: lm.Bytes(),
ChildId: lm.Child,
},
},

View file

@ -245,7 +245,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
Parent: lm.GetParentId(),
Child: lm.GetChildId(),
}
if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
if err := m.FromBytes(lm.GetMeta()); err != nil {
return err
}
select {
@ -415,7 +415,7 @@ func (s *Service) syncLoop(ctx context.Context) {
start := time.Now()
cnrs, err := s.cfg.cnrSource.List(ctx)
cnrs, err := s.cnrSource.List(ctx)
if err != nil {
s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err))
s.metrics.AddSyncDuration(time.Since(start), false)