refactoring: make unused linter stricker #1388

Merged
dstepanov-yadro merged 15 commits from dstepanov-yadro/frostfs-node:refactoring/drop_unused into master 2024-09-25 08:55:39 +00:00
28 changed files with 32 additions and 124 deletions

View file

@ -38,6 +38,10 @@ linters-settings:
alias: alias:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK alias: objectSDK
unused:
field-writes-are-uses: false
exported-fields-are-used: false
local-variables-are-used: false
custom: custom:
truecloudlab-linters: truecloudlab-linters:
path: bin/linters/external_linters.so path: bin/linters/external_linters.so

View file

@ -565,13 +565,6 @@ type HeadObjectPrm struct {
commonObjectPrm commonObjectPrm
objectAddressPrm objectAddressPrm
rawPrm rawPrm
mainOnly bool
}
// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
x.mainOnly = v
} }
// HeadObjectRes groups the resulting values of HeadObject operation. // HeadObjectRes groups the resulting values of HeadObject operation.

View file

@ -38,7 +38,6 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag) _ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.") flags.String(fileFlag, "", "File to write header to. Default: stdout.")
flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON") flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf") flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc) flags.Bool(rawFlag, false, rawFlagDesc)
@ -49,7 +48,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var obj oid.ID var obj oid.ID
objAddr := readObjectAddress(cmd, &cnr, &obj) objAddr := readObjectAddress(cmd, &cnr, &obj)
mainOnly, _ := cmd.Flags().GetBool("main-only")
pk := key.GetOrGenerate(cmd) pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag) raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw) prm.SetRawFlag(raw)
prm.SetAddress(objAddr) prm.SetAddress(objAddr)
prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm) res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil { if err != nil {

View file

@ -7,6 +7,8 @@ import (
) )
type RawEntry struct { type RawEntry struct {
// key and value used for record dump.

@a-savchuk please review

@a-savchuk please review
// nolint:unused
key, value []byte key, value []byte
a-savchuk marked this conversation as resolved Outdated

For a raw entry, as for any entry, we wanna be able to see its dump (see DetailedString method). Both fields key and value are intended to be shown in a dump view. Could we ignore the lint warning here? or should we do something trickier like this

func (r *RawEntry) DetailedString() string {
	_ = r.value // both value and key need to be used in dump 
	return spew.Sdump(r)
}

and add an additional comment to describe that decision?

For a raw entry, as for any entry, we wanna be able to see its dump (see `DetailedString` method). Both fields `key` and `value` are intended to be shown in a dump view. Could we ignore the lint warning here? or should we do something trickier like this ```go func (r *RawEntry) DetailedString() string { _ = r.value // both value and key need to be used in dump return spew.Sdump(r) } ``` and add an additional comment to describe that decision?
Review

fixed

fixed
} }

View file

@ -16,6 +16,8 @@ type (
DefaultRecord struct { DefaultRecord struct {
addr oid.Address addr oid.Address
// data used for record dump.
// nolint:unused
data []byte data []byte
a-savchuk marked this conversation as resolved Outdated

Same comment as the one for RawEntry. We should keep that field for DetailedString method

Same comment as the one for `RawEntry`. We should keep that field for `DetailedString` method
Review

fixed

fixed
} }
) )

View file

@ -602,7 +602,6 @@ type cfgNetmap struct {
needBootstrap bool needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
startEpoch uint64 // epoch number when application is started
} }
type cfgNodeInfo struct { type cfgNodeInfo struct {
@ -1082,7 +1081,6 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else { } else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
chainbase.WithLogger(c.log),
chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()), chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()), chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()), chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),

View file

@ -128,9 +128,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = client cnrRdr.lister = client
cnrRdr.eacl = c.cfgObject.eaclSource cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource cnrRdr.src = c.cfgObject.cnrSource
cnrWrt.cacheEnabled = true
cnrWrt.eacls = cachedEACLStorage
} }
return cnrRdr, cnrWrt return cnrRdr, cnrWrt
@ -247,9 +244,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
type morphContainerWriter struct { type morphContainerWriter struct {
neoClient *cntClient.Client neoClient *cntClient.Client
cacheEnabled bool
eacls ttlEACLStorage
} }
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) { func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {

View file

@ -259,7 +259,6 @@ func initNetmapState(c *cfg) {
} }
c.cfgNetmap.state.setCurrentEpoch(epoch) c.cfgNetmap.state.setCurrentEpoch(epoch)
c.cfgNetmap.startEpoch = epoch
c.setContractNodeInfo(ni) c.setContractNodeInfo(ni)
} }

View file

@ -473,7 +473,6 @@ func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFe
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService( return objectAPE.NewService(
c.log,
objectAPE.NewChecker( objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),

View file

@ -21,7 +21,9 @@ type accessPolicyEngine struct {
var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil) var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
type morphAPEChainCacheKey struct { type morphAPEChainCacheKey struct {
// nolint:unused
name chain.Name name chain.Name
// nolint:unused
target engine.Target target engine.Target
} }

View file

@ -17,6 +17,7 @@ type InnerRingServiceMetrics struct {
eventDuration *prometheus.HistogramVec eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics morphCacheMetrics *morphCacheMetrics
logMetrics logger.LogMetrics logMetrics logger.LogMetrics
// nolint: unused
appInfo *ApplicationInfo appInfo *ApplicationInfo
} }

View file

@ -25,6 +25,7 @@ type NodeMetrics struct {
morphClient *morphClientMetrics morphClient *morphClientMetrics
morphCache *morphCacheMetrics morphCache *morphCacheMetrics
log logger.LogMetrics log logger.LogMetrics
// nolint: unused
appInfo *ApplicationInfo appInfo *ApplicationInfo
} }

View file

@ -5,9 +5,7 @@ import (
"os" "os"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.uber.org/zap"
) )
type Option func(*cfg) type Option func(*cfg)
@ -18,7 +16,6 @@ type cfg struct {
noSync bool noSync bool
maxBatchDelay time.Duration maxBatchDelay time.Duration
maxBatchSize int maxBatchSize int
log *logger.Logger
} }
func defaultCfg() *cfg { func defaultCfg() *cfg {
@ -26,7 +23,6 @@ func defaultCfg() *cfg {
perm: os.ModePerm, perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay, maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize, maxBatchSize: bbolt.DefaultMaxBatchSize,
log: &logger.Logger{Logger: zap.L()},
} }
} }
@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option {
c.maxBatchSize = maxBatchSize c.maxBatchSize = maxBatchSize
} }
} }
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
}
}

View file

@ -163,7 +163,6 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
Log: s.log, Log: s.log,
Metrics: s.irMetrics, Metrics: s.irMetrics,
FrostFSClient: frostfsCli, FrostFSClient: frostfsCli,
NetmapClient: s.netmapClient,
AlphabetState: s, AlphabetState: s,
EpochState: s, EpochState: s,
Voter: s, Voter: s,

View file

@ -103,6 +103,8 @@ type (
// to the application. // to the application.
runners []func(chan<- error) error runners []func(chan<- error) error
// cmode used for upgrade scenario.
// nolint:unused
cmode *atomic.Bool cmode *atomic.Bool
} }

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@ -38,7 +37,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys, alphabetKeys: testKeys.mainnetKeys,
} }
f := &testFrostFSClient{} f := &testFrostFSClient{}
nm := &testNetmapClient{}
proc, err := New( proc, err := New(
&Params{ &Params{
@ -50,7 +48,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m, MorphClient: m,
MainnetClient: mn, MainnetClient: mn,
FrostFSClient: f, FrostFSClient: f,
NetmapClient: nm,
}, },
) )
@ -73,10 +70,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
}, },
}, v.votes, "invalid vote calls") }, v.votes, "invalid vote calls")
var irUpdateExp []nmClient.UpdateIRPrm
require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
var expAlphabetUpdate client.UpdateAlphabetListPrm var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash) expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp) expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@ -119,7 +112,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys, alphabetKeys: testKeys.mainnetKeys,
} }
f := &testFrostFSClient{} f := &testFrostFSClient{}
nm := &testNetmapClient{}
proc, err := New( proc, err := New(
&Params{ &Params{
@ -131,7 +123,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m, MorphClient: m,
MainnetClient: mn, MainnetClient: mn,
FrostFSClient: f, FrostFSClient: f,
NetmapClient: nm,
}, },
) )
@ -155,9 +146,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
}, },
}, v.votes, "invalid vote calls") }, v.votes, "invalid vote calls")
var irUpdatesExp []nmClient.UpdateIRPrm
require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
var alpabetUpdExp client.UpdateAlphabetListPrm var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp) alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash) alpabetUpdExp.SetHash(ev.TxHash)
@ -293,12 +281,3 @@ func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm)
c.updates = append(c.updates, p) c.updates = append(c.updates, p)
return nil return nil
} }
type testNetmapClient struct {
updates []nmClient.UpdateIRPrm
}
func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
c.updates = append(c.updates, p)
return nil
}

View file

@ -79,7 +79,6 @@ type (
metrics metrics.Register metrics metrics.Register
pool *ants.Pool pool *ants.Pool
frostfsClient FrostFSClient frostfsClient FrostFSClient
netmapClient NetmapClient
alphabetState AlphabetState alphabetState AlphabetState
epochState EpochState epochState EpochState
@ -105,7 +104,6 @@ type (
MorphClient MorphClient MorphClient MorphClient
MainnetClient MainnetClient MainnetClient MainnetClient
FrostFSClient FrostFSClient FrostFSClient FrostFSClient
NetmapClient NetmapClient
} }
) )
@ -146,7 +144,6 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister, metrics: metricsRegister,
pool: pool, pool: pool,
frostfsClient: p.FrostFSClient, frostfsClient: p.FrostFSClient,
netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState, alphabetState: p.AlphabetState,
epochState: p.EpochState, epochState: p.EpochState,
voter: p.Voter, voter: p.Voter,

View file

@ -16,5 +16,5 @@ func (s *memstoreImpl) Type() string { return Type }
func (s *memstoreImpl) Path() string { return s.rootPath } func (s *memstoreImpl) Path() string { return s.rootPath }
func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f } func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {}
func (s *memstoreImpl) SetParentID(string) {} func (s *memstoreImpl) SetParentID(string) {}

View file

@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -16,7 +15,6 @@ import (
func TestSimpleLifecycle(t *testing.T) { func TestSimpleLifecycle(t *testing.T) {
s := New( s := New(
WithRootPath("memstore"), WithRootPath("memstore"),
WithLogger(test.NewLogger(t)),
) )
defer func() { require.NoError(t, s.Close()) }() defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Open(mode.ComponentReadWrite))

View file

@ -2,33 +2,20 @@ package memstore
import ( import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
) )
type cfg struct { type cfg struct {
log *logger.Logger
rootPath string rootPath string
readOnly bool readOnly bool
compression *compression.Config compression *compression.Config
reportError func(string, error)
} }
func defaultConfig() *cfg { func defaultConfig() *cfg {
return &cfg{ return &cfg{}
log: &logger.Logger{Logger: zap.L()},
reportError: func(string, error) {},
}
} }
type Option func(*cfg) type Option func(*cfg)
func WithLogger(l *logger.Logger) Option {
return func(c *cfg) {
c.log = l
}
}
func WithRootPath(p string) Option { func WithRootPath(p string) Option {
return func(c *cfg) { return func(c *cfg) {
c.rootPath = p c.rootPath = p

View file

@ -249,23 +249,9 @@ func (e *StorageEngine) ResumeExecution() error {
} }
type ReConfiguration struct { type ReConfiguration struct {
errorsThreshold uint32
shardPoolSize uint32
shards map[string][]shard.Option // meta path -> shard opts shards map[string][]shard.Option // meta path -> shard opts
} }
// SetErrorsThreshold sets a size amount of errors after which
// shard is moved to read-only mode.
func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
rCfg.errorsThreshold = errorsThreshold
}
// SetShardPoolSize sets a size of worker pool for each shard.
func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
rCfg.shardPoolSize = shardPoolSize
}
// AddShard adds a shard for the reconfiguration. // AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor. // Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) { func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {

View file

@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct { type referenceNumber struct {
all, cur int all, cur int
addr oid.Address
obj *objectSDK.Object obj *objectSDK.Object
} }
@ -296,7 +294,6 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if !ok { if !ok {
nRef = &referenceNumber{ nRef = &referenceNumber{
all: parentLength(tx, parAddr), all: parentLength(tx, parAddr),
addr: parAddr,
obj: parent, obj: parent,
} }

View file

@ -12,7 +12,6 @@ import (
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -24,8 +23,6 @@ import (
var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
type Service struct { type Service struct {
log *logger.Logger
apeChecker Checker apeChecker Checker
next objectSvc.ServiceServer next objectSvc.ServiceServer
@ -67,9 +64,8 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
} }
} }
func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service { func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{ return &Service{
log: log,
apeChecker: apeChecker, apeChecker: apeChecker,
next: next, next: next,
} }

View file

@ -43,7 +43,7 @@ func (r *request) assembleEC(ctx context.Context) {
} }
r.prm.common = r.prm.common.WithLocalOnly(false) r.prm.common = r.prm.common.WithLocalOnly(false)
assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
r.log.Debug(logs.GetAssemblingECObject, r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_offset", r.ctxRange().GetOffset()),

View file

@ -34,7 +34,6 @@ type assemblerec struct {
rng *objectSDK.Range rng *objectSDK.Range
remoteStorage ecRemoteStorage remoteStorage ecRemoteStorage
localStorage localStorage localStorage localStorage
cs container.Source
log *logger.Logger log *logger.Logger
head bool head bool
traverserGenerator traverserGenerator traverserGenerator traverserGenerator
@ -47,7 +46,6 @@ func newAssemblerEC(
rng *objectSDK.Range, rng *objectSDK.Range,
remoteStorage ecRemoteStorage, remoteStorage ecRemoteStorage,
localStorage localStorage, localStorage localStorage,
cs container.Source,
log *logger.Logger, log *logger.Logger,
head bool, head bool,
tg traverserGenerator, tg traverserGenerator,
@ -59,7 +57,6 @@ func newAssemblerEC(
ecInfo: ecInfo, ecInfo: ecInfo,
remoteStorage: remoteStorage, remoteStorage: remoteStorage,
localStorage: localStorage, localStorage: localStorage,
cs: cs,
log: log, log: log,
head: head, head: head,
traverserGenerator: tg, traverserGenerator: tg,

View file

@ -2,7 +2,6 @@ package putsvc
import ( import (
"context" "context"
"crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@ -21,8 +20,6 @@ type PutInitPrm struct {
traverseOpts []placement.Option traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error

@aarifullin please review. Looks like this could be deleted after Patch related refactorings

@aarifullin please review. Looks like this could be deleted after `Patch` related refactorings
aarifullin marked this conversation as resolved Outdated

Oh, no. My refactoring was incorrect. Please, do not apply this change until my fix is merged

Oh, no. My refactoring was incorrect. Please, do not apply this change until my fix is merged
privateKey *ecdsa.PrivateKey
} }
type PutChunkPrm struct { type PutChunkPrm struct {
@ -68,11 +65,3 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p return p
} }
func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
if p != nil {
p.privateKey = v
}
return p
}

View file

@ -10,8 +10,6 @@ import (
) )
type SignService struct { type SignService struct {
key *ecdsa.PrivateKey
sigSvc *util.SignService sigSvc *util.SignService
svc ServiceServer svc ServiceServer
@ -48,7 +46,6 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService { func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{ return &SignService{
key: key,
sigSvc: util.NewUnarySignService(key), sigSvc: util.NewUnarySignService(key),
svc: svc, svc: svc,
} }

View file

@ -9,7 +9,9 @@ import (
) )
type key struct { type key struct {
// nolint:unused
tokenID string tokenID string
// nolint:unused
ownerID string ownerID string
} }