From d6534fd755c22fd23ed471e2440d95d640903f98 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 1 Mar 2024 11:58:17 +0300 Subject: [PATCH] [#1016] frostfs-node: Fix gopls issues Signed-off-by: Dmitrii Stepanov --- .../morph/initialize/initialize_test.go | 6 ++--- cmd/frostfs-node/cache_test.go | 2 +- cmd/frostfs-node/config/morph/config_test.go | 10 +++++++-- internal/ape/converter_test.go | 22 +++++++++---------- pkg/innerring/initialization.go | 5 ----- .../blobstor/blobovniczatree/cache.go | 2 +- .../engine/delete_test.go | 8 +++---- .../engine/engine_test.go | 12 +++++----- pkg/local_object_storage/engine/head_test.go | 4 ++-- .../engine/inhume_test.go | 4 ++-- pkg/local_object_storage/metabase/put_test.go | 6 ++--- pkg/services/object/ape/checker_test.go | 4 ++-- pkg/services/object/ape/request_test.go | 4 ++-- .../placement/traverser_test.go | 12 +++++----- pkg/services/tree/cache.go | 2 +- pkg/services/tree/sync.go | 2 +- 16 files changed, 53 insertions(+), 52 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go index e39c7356f..6c52aa2ab 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go @@ -53,7 +53,7 @@ func TestInitialize(t *testing.T) { testInitialize(t, constants.MaxAlphabetNodes) }) t.Run("too many nodes", func(t *testing.T) { - require.ErrorIs(t, generateTestData(t, t.TempDir(), constants.MaxAlphabetNodes+1), helper.ErrTooManyAlphabetNodes) + require.ErrorIs(t, generateTestData(t.TempDir(), constants.MaxAlphabetNodes+1), helper.ErrTooManyAlphabetNodes) }) } @@ -61,7 +61,7 @@ func testInitialize(t *testing.T, committeeSize int) { testdataDir := t.TempDir() v := viper.GetViper() - require.NoError(t, generateTestData(t, testdataDir, committeeSize)) + require.NoError(t, generateTestData(testdataDir, committeeSize)) v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) // Set to the path or remove the next statement to download from the network. @@ -98,7 +98,7 @@ func testInitialize(t *testing.T, committeeSize int) { }) } -func generateTestData(t *testing.T, dir string, size int) error { +func generateTestData(dir string, size int) error { v := viper.GetViper() v.Set(commonflags.AlphabetWalletsFlag, dir) diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index 6e076abfc..6981ac721 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -10,7 +10,7 @@ import ( func TestTTLNetCache(t *testing.T) { ttlDuration := time.Millisecond * 50 - cache := newNetworkTTLCache[string, time.Time](10, ttlDuration, testNetValueReader) + cache := newNetworkTTLCache(10, ttlDuration, testNetValueReader) key := "key" diff --git a/cmd/frostfs-node/config/morph/config_test.go b/cmd/frostfs-node/config/morph/config_test.go index 192140446..a30733cd0 100644 --- a/cmd/frostfs-node/config/morph/config_test.go +++ b/cmd/frostfs-node/config/morph/config_test.go @@ -24,8 +24,14 @@ func TestMorphSection(t *testing.T) { const path = "../../../../config/example/node" rpcs := []client.Endpoint{ - {"wss://rpc1.morph.frostfs.info:40341/ws", 1}, - {"wss://rpc2.morph.frostfs.info:40341/ws", 2}, + { + Address: "wss://rpc1.morph.frostfs.info:40341/ws", + Priority: 1, + }, + { + Address: "wss://rpc2.morph.frostfs.info:40341/ws", + Priority: 2, + }, } fileConfigTest := func(c *config.Config) { diff --git a/internal/ape/converter_test.go b/internal/ape/converter_test.go index de72408b1..b5f8939c4 100644 --- a/internal/ape/converter_test.go +++ b/internal/ape/converter_test.go @@ -27,7 +27,7 @@ func TestEACLTableWithoutRecords(t *testing.T) { res: &testResource{name: nativeschema.ResourceFormatRootObjects}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) cnrID := cidtest.ID() tb.SetCID(cnrID) @@ -37,7 +37,7 @@ func TestEACLTableWithoutRecords(t *testing.T) { ch, err = ConvertEACLToAPE(tb) require.NoError(t, err) - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } func TestNoTargets(t *testing.T) { @@ -69,7 +69,7 @@ func TestNoTargets(t *testing.T) { }, res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } } @@ -109,7 +109,7 @@ func TestNoFilters(t *testing.T) { }, res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) @@ -155,7 +155,7 @@ func TestNoFilters(t *testing.T) { }, res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) @@ -199,7 +199,7 @@ func TestNoFilters(t *testing.T) { }, res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) @@ -236,7 +236,7 @@ func TestNoFilters(t *testing.T) { }, res: &testResource{name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString())}, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) } @@ -292,7 +292,7 @@ func TestWithFilters(t *testing.T) { }, }, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) @@ -342,7 +342,7 @@ func TestWithFilters(t *testing.T) { name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()), }, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } }) } @@ -391,10 +391,10 @@ func TestNoHeader(t *testing.T) { name: fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cnrID.EncodeToString()), }, } - compare(t, tb, vu, ch, req) + compare(t, vu, ch, req) } -func compare(t *testing.T, eaclTable *eacl.Table, vu *eacl.ValidationUnit, ch *apechain.Chain, req *testRequest) { +func compare(t *testing.T, vu *eacl.ValidationUnit, ch *apechain.Chain, req *testRequest) { validator := eacl.NewValidator() for eaclOp, apeOp := range eaclOperationToEngineAction { vu.WithOperation(eaclOp) diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index f4d9b4169..1a4174289 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -42,10 +42,6 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, return err } - if err != nil { - return err - } - netSettings := (*networkSettings)(s.netmapClient) var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator @@ -76,7 +72,6 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, NodeStateSettings: netSettings, }) - if err != nil { return err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go index d8fe6e8b0..ef1793edf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go @@ -24,7 +24,7 @@ type dbCache struct { } func newDBCache(size int, ttl time.Duration, dbManager *dbManager) *dbCache { - cache := expirable.NewLRU[string, *sharedDB](size, func(_ string, evictedDB *sharedDB) { + cache := expirable.NewLRU(size, func(_ string, evictedDB *sharedDB) { evictedDB.Close() }, ttl) return &dbCache{ diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 32d07809a..4a6758012 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -49,9 +49,9 @@ func TestDeleteBigObject(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t, 1) - s2 := testNewShard(t, 2) - s3 := testNewShard(t, 3) + s1 := testNewShard(t) + s2 := testNewShard(t) + s3 := testNewShard(t) e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine e.log = test.NewLogger(t) @@ -119,7 +119,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t, 1, shard.WithDisabledGC()) + s1 := testNewShard(t, shard.WithDisabledGC()) e := testNewEngine(t).setInitializedShards(t, s1).engine e.log = test.NewLogger(t) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index b20f45be5..b6858df49 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -50,7 +50,7 @@ func BenchmarkExists(b *testing.B) { func benchmarkExists(b *testing.B, shardNum int) { shards := make([]*shard.Shard, shardNum) for i := 0; i < shardNum; i++ { - shards[i] = testNewShard(b, i) + shards[i] = testNewShard(b) } e := testNewEngine(b).setInitializedShards(b, shards...).engine @@ -112,7 +112,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap shards := make([]*shard.Shard, 0, num) for i := 0; i < num; i++ { - shards = append(shards, testNewShard(t, i)) + shards = append(shards, testNewShard(t)) } return te.setInitializedShards(t, shards...) @@ -130,7 +130,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { for i := 0; i < num; i++ { - defaultOpts := testDefaultShardOptions(t, i) + defaultOpts := testDefaultShardOptions(t) opts := append(defaultOpts, shardOpts(i)...) id, err := te.engine.AddShard(context.Background(), opts...) require.NoError(t, err) @@ -187,11 +187,11 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, smallFileStorage, largeFileStorage } -func testNewShard(t testing.TB, id int, opts ...shard.Option) *shard.Shard { +func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard { sid, err := generateShardID() require.NoError(t, err) - shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t, id)...) + shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...) s := shard.New(append(shardOpts, opts...)...) require.NoError(t, s.Open(context.Background())) @@ -200,7 +200,7 @@ func testNewShard(t testing.TB, id int, opts ...shard.Option) *shard.Shard { return s } -func testDefaultShardOptions(t testing.TB, id int) []shard.Option { +func testDefaultShardOptions(t testing.TB) []shard.Option { return []shard.Option{ shard.WithLogger(test.NewLogger(t)), shard.WithBlobStorOptions( diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index 5c123d617..5afc50f07 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -39,8 +39,8 @@ func TestHeadRaw(t *testing.T) { link.SetSplitID(splitID) t.Run("virtual object split in different shards", func(t *testing.T) { - s1 := testNewShard(t, 1) - s2 := testNewShard(t, 2) + s1 := testNewShard(t) + s2 := testNewShard(t) e := testNewEngine(t).setInitializedShards(t, s1, s2).engine defer e.Close(context.Background()) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 4bb128bd7..9daa113f8 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -56,8 +56,8 @@ func TestStorageEngine_Inhume(t *testing.T) { t.Run("delete big object", func(t *testing.T) { t.Parallel() - s1 := testNewShard(t, 1) - s2 := testNewShard(t, 2) + s1 := testNewShard(t) + s2 := testNewShard(t) e := testNewEngine(t).setInitializedShards(t, s1, s2).engine defer e.Close(context.Background()) diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 28467199d..84e4029f2 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" ) -func prepareObjects(t testing.TB, n int) []*objectSDK.Object { +func prepareObjects(n int) []*objectSDK.Object { cnr := cidtest.ID() parentID := objecttest.ID() objs := make([]*objectSDK.Object, n) @@ -53,7 +53,7 @@ func BenchmarkPut(b *testing.B) { var index atomic.Int64 index.Store(-1) - objs := prepareObjects(b, b.N) + objs := prepareObjects(b.N) b.ResetTimer() b.ReportAllocs() b.RunParallel(func(pb *testing.PB) { @@ -71,7 +71,7 @@ func BenchmarkPut(b *testing.B) { defer func() { require.NoError(b, db.Close()) }() var index atomic.Int64 index.Store(-1) - objs := prepareObjects(b, b.N) + objs := prepareObjects(b.N) b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index d7e97064b..443414959 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -82,7 +82,7 @@ func stringPtr(s string) *string { return &s } -func newHeaderObjectSDK(t *testing.T, cnr cid.ID, oid *oid.ID, headerObjSDK *headerObjectSDKParams) *objectSDK.Object { +func newHeaderObjectSDK(cnr cid.ID, oid *oid.ID, headerObjSDK *headerObjectSDKParams) *objectSDK.Object { objSDK := objectSDK.New() objSDK.SetContainerID(cnr) if oid != nil { @@ -344,7 +344,7 @@ func TestAPECheck(t *testing.T) { var headerObjSDK *objectSDK.Object if test.header.headerObjSDK != nil { - headerObjSDK = newHeaderObjectSDK(t, cnr, obj, test.header.headerObjSDK) + headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK) if test.header.fromHeaderProvider { require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider") headerProvider.addHeader(cnr, *obj, headerObjSDK) diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index 55aa691b4..0dcc0b84b 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -80,7 +80,7 @@ func TestObjectProperties(t *testing.T) { t.Run(test.name, func(t *testing.T) { cnr := newContainerIDSDK(t, test.container) obj := newObjectIDSDK(t, test.object) - header := newHeaderObjectSDK(t, cnr, obj, test.header) + header := newHeaderObjectSDK(cnr, obj, test.header) props := objectProperties(cnr, obj, header.ToV2().GetHeader()) require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) @@ -223,7 +223,7 @@ func TestNewAPERequest(t *testing.T) { var headerObjSDK *objectSDK.Object if test.header.headerObjSDK != nil { - headerObjSDK = newHeaderObjectSDK(t, cnr, obj, test.header.headerObjSDK) + headerObjSDK = newHeaderObjectSDK(cnr, obj, test.header.headerObjSDK) if test.header.fromHeaderProvider { require.NotNil(t, obj, "oid is required if a header is expected to be found in header provider") headerSource.addHeader(cnr, *obj, headerObjSDK) diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index 9b70efc73..f5731c81e 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -40,7 +40,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { return vc } -func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, container.Container) { +func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { nodes := make([][]netmap.NodeInfo, 0, len(rs)) replicas := make([]netmap.ReplicaDescriptor, 0, len(rs)) num := uint32(0) @@ -83,7 +83,7 @@ func TestTraverserObjectScenarios(t *testing.T) { selectors := []int{2, 3} replicas := []int{1, 2} - nodes, cnr := testPlacement(t, selectors, replicas) + nodes, cnr := testPlacement(selectors, replicas) nodesCopy := copyVectors(nodes) @@ -112,7 +112,7 @@ func TestTraverserObjectScenarios(t *testing.T) { selectors := []int{5, 3} replicas := []int{2, 2} - nodes, cnr := testPlacement(t, selectors, replicas) + nodes, cnr := testPlacement(selectors, replicas) nodesCopy := copyVectors(nodes) @@ -141,7 +141,7 @@ func TestTraverserObjectScenarios(t *testing.T) { selectors := []int{5, 3} replicas := []int{2, 2} - nodes, cnr := testPlacement(t, selectors, replicas) + nodes, cnr := testPlacement(selectors, replicas) nodesCopy := copyVectors(nodes) @@ -184,7 +184,7 @@ func TestTraverserObjectScenarios(t *testing.T) { selectors := []int{2, 3} replicas := []int{1, 2} - nodes, cnr := testPlacement(t, selectors, replicas) + nodes, cnr := testPlacement(selectors, replicas) tr, err := NewTraverser( ForContainer(cnr), @@ -213,7 +213,7 @@ func TestTraverserRemValues(t *testing.T) { selectors := []int{3, 4, 5} replicas := []int{2, 3, 4} - nodes, cnr := testPlacement(t, selectors, replicas) + nodes, cnr := testPlacement(selectors, replicas) nodesCopy := copyVectors(nodes) testCases := [...]struct { diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index f50aa0b0d..1be1c2f83 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -35,7 +35,7 @@ const ( var errRecentlyFailed = errors.New("client has recently failed") func (c *clientCache) init() { - l, _ := simplelru.NewLRU[string, cacheItem](defaultClientCacheSize, func(_ string, value cacheItem) { + l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) { if conn := value.cc; conn != nil { _ = conn.Close() } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 2a19ae18a..064ee5900 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -251,7 +251,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, } opsCh <- m } - if err != nil && !errors.Is(err, io.EOF) { + if !errors.Is(err, io.EOF) { return err } return nil