Linter update #857

Merged
dstepanov-yadro merged 4 commits from dstepanov-yadro/frostfs-node:feat/linter_update into master 2024-09-04 19:51:05 +00:00
15 changed files with 34 additions and 30 deletions

View file

@ -79,5 +79,8 @@ linters:
- contextcheck - contextcheck
- importas - importas
- truecloudlab-linters - truecloudlab-linters
- perfsprint
- testifylint
- protogetter
disable-all: true disable-all: true
fast: false fast: false

View file

@ -8,8 +8,8 @@ HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.21 GO_VERSION ?= 1.21
LINT_VERSION ?= 1.54.0 LINT_VERSION ?= 1.55.2
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2 TRUECLOUDLAB_LINT_VERSION ?= 0.0.3
PROTOC_VERSION ?= 25.0 PROTOC_VERSION ?= 25.0
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf) PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2) PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)

View file

@ -220,7 +220,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING || if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING ||
resp.GetBody().GetDuration() == nil || resp.GetBody().GetDuration() == nil ||
resp.GetBody().GetTotal() == 0 || resp.GetBody().GetTotal() == 0 ||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed()+resp.Body.GetSkipped() == 0 { resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed()+resp.GetBody().GetSkipped() == 0 {
return return
} }
@ -252,8 +252,8 @@ func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatus
} }
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
if len(resp.Body.GetErrorMessage()) > 0 { if len(resp.GetBody().GetErrorMessage()) > 0 {
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.Body.GetErrorMessage())) sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
} }
} }

View file

@ -63,7 +63,7 @@ func removeRule(cmd *cobra.Command, _ []string) {
verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
if resp.GetBody().Removed { if resp.GetBody().GetRemoved() {
cmd.Println("Rule has been removed.") cmd.Println("Rule has been removed.")
} else { } else {
cmd.Println("Rule has not been removed.") cmd.Println("Rule has not been removed.")

View file

@ -65,7 +65,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
out := make([]map[string]any, 0, len(ii)) out := make([]map[string]any, 0, len(ii))
for _, i := range ii { for _, i := range ii {
out = append(out, map[string]any{ out = append(out, map[string]any{
"shard_id": base58.Encode(i.Shard_ID), "shard_id": base58.Encode(i.GetShard_ID()),
"mode": shardModeToString(i.GetMode()), "mode": shardModeToString(i.GetMode()),
"metabase": i.GetMetabasePath(), "metabase": i.GetMetabasePath(),
"blobstor": i.GetBlobstor(), "blobstor": i.GetBlobstor(),
@ -105,7 +105,7 @@ func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
pathPrinter("Write-cache", i.GetWritecachePath())+ pathPrinter("Write-cache", i.GetWritecachePath())+
pathPrinter("Pilorama", i.GetPiloramaPath())+ pathPrinter("Pilorama", i.GetPiloramaPath())+
fmt.Sprintf("Error count: %d\n", i.GetErrorCount()), fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
base58.Encode(i.Shard_ID), base58.Encode(i.GetShard_ID()),
shardModeToString(i.GetMode()), shardModeToString(i.GetMode()),
) )
} }
@ -122,6 +122,6 @@ func shardModeToString(m control.ShardMode) string {
func sortShardsByID(ii []*control.ShardInfo) { func sortShardsByID(ii []*control.ShardInfo) {
sort.Slice(ii, func(i, j int) bool { sort.Slice(ii, func(i, j int) bool {
return bytes.Compare(ii[i].Shard_ID, ii[j].Shard_ID) < 0 return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
}) })
} }

View file

@ -74,7 +74,7 @@ func add(cmd *cobra.Command, _ []string) {
resp, err := cli.Add(ctx, req) resp, err := cli.Add(ctx, req)
commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err) commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
cmd.Println("Node ID: ", resp.Body.NodeId) cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
} }
func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) { func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {

View file

@ -167,7 +167,6 @@ func connectNats(ctx context.Context, c *cfg) {
err := c.cfgNotifications.nw.w.Connect(ctx, endpoint) err := c.cfgNotifications.nw.w.Connect(ctx, endpoint)
if err != nil { if err != nil {
panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err)) panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err))
} else { }
c.log.Info(logs.NatsConnectedToEndpoint, zap.String("endpoint", endpoint)) c.log.Info(logs.NatsConnectedToEndpoint, zap.String("endpoint", endpoint))
} }
}

View file

@ -82,7 +82,7 @@ func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []object
_, err := s.Iterate(context.Background(), iterPrm) _, err := s.Iterate(context.Background(), iterPrm)
require.Equal(t, err, logicErr) require.Equal(t, err, logicErr)
require.Equal(t, len(objects)/2, len(seen)) require.Len(t, seen, len(objects)/2)
for i := range objects { for i := range objects {
d, ok := seen[objects[i].addr.String()] d, ok := seen[objects[i].addr.String()]
if ok { if ok {

View file

@ -91,7 +91,7 @@ func TestFlush[Option any](
require.Equal(t, uint32(0), errCount.Load()) require.Equal(t, uint32(0), errCount.Load())
require.Error(t, wc.Flush(context.Background(), false)) require.Error(t, wc.Flush(context.Background(), false))
require.True(t, errCount.Load() > 0) require.Greater(t, errCount.Load(), uint32(0))
require.NoError(t, wc.Flush(context.Background(), true)) require.NoError(t, wc.Flush(context.Background(), true))
check(t, mb, bs, objects) check(t, mb, bs, objects)

View file

@ -1,7 +1,7 @@
package metrics package metrics
import ( import (
"fmt" "strconv"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
@ -52,7 +52,7 @@ func (m *writeCacheMetrics) AddMethodDuration(shardID string, method string, suc
m.methodDuration.With( m.methodDuration.With(
prometheus.Labels{ prometheus.Labels{
shardIDLabel: shardID, shardIDLabel: shardID,
successLabel: fmt.Sprintf("%v", success), successLabel: strconv.FormatBool(success),
storageLabel: storageType, storageLabel: storageType,
methodLabel: method, methodLabel: method,
}, },

View file

@ -114,13 +114,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
return nil, status.Error(codes.PermissionDenied, err.Error()) return nil, status.Error(codes.PermissionDenied, err.Error())
} }
if len(req.Body.GetContainerId()) > 0 && len(req.Body.GetOwner()) > 0 { if len(req.GetBody().GetContainerId()) > 0 && len(req.GetBody().GetOwner()) > 0 {
return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed") return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed")
} }
var vub uint32 var vub uint32
if len(req.Body.GetContainerId()) > 0 { if len(req.GetBody().GetContainerId()) > 0 {
var containerID cid.ID var containerID cid.ID
if err := containerID.Decode(req.Body.GetContainerId()); err != nil { if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse container ID: %s", err.Error())) return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse container ID: %s", err.Error()))
} }
var err error var err error

View file

@ -15,12 +15,12 @@ func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*contr
return nil, status.Error(codes.PermissionDenied, err.Error()) return nil, status.Error(codes.PermissionDenied, err.Error())
} }
if !req.Body.RemoveDuplicates { if !req.GetBody().GetRemoveDuplicates() {
return nil, status.Error(codes.InvalidArgument, "operation not specified") return nil, status.Error(codes.InvalidArgument, "operation not specified")
} }
var prm engine.RemoveDuplicatesPrm var prm engine.RemoveDuplicatesPrm
prm.Concurrency = int(req.Body.Concurrency) prm.Concurrency = int(req.GetBody().GetConcurrency())
err = s.s.RemoveDuplicates(ctx, prm) err = s.s.RemoveDuplicates(ctx, prm)
if err != nil { if err != nil {

View file

@ -36,8 +36,8 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode)) return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode))
} }
for _, shardID := range s.getShardIDList(req.Body.GetShard_ID()) { for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
err = s.s.SetShardMode(shardID, m, req.Body.GetResetErrorCounter()) err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }

View file

@ -369,13 +369,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
x.ParentId = parent x.ParentId = parent
x.NodeId = node x.NodeId = node
x.Timestamp = m.Time x.Timestamp = m.Time
if b.AllAttributes { if b.GetAllAttributes() {
x.Meta = metaToProto(m.Items) x.Meta = metaToProto(m.Items)
} else { } else {
var metaValue []*KeyValue
for _, kv := range m.Items { for _, kv := range m.Items {
for _, attr := range b.GetAttributes() { for _, attr := range b.GetAttributes() {
if kv.Key == attr { if kv.Key == attr {
x.Meta = append(x.Meta, &KeyValue{ metaValue = append(metaValue, &KeyValue{
Key: kv.Key, Key: kv.Key,
Value: kv.Value, Value: kv.Value,
}) })
@ -383,6 +384,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
} }
} }
} }
x.Meta = metaValue
} }
info = append(info, &x) info = append(info, &x)
} }
@ -670,8 +672,8 @@ func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
meta := make([]pilorama.KeyValue, len(arr)) meta := make([]pilorama.KeyValue, len(arr))
for i, kv := range arr { for i, kv := range arr {
if kv != nil { if kv != nil {
meta[i].Key = kv.Key meta[i].Key = kv.GetKey()
meta[i].Value = kv.Value meta[i].Value = kv.GetValue()
} }
} }
return meta return meta

View file

@ -243,10 +243,10 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
for ; err == nil; res, err = c.Recv() { for ; err == nil; res, err = c.Recv() {
lm := res.GetBody().GetOperation() lm := res.GetBody().GetOperation()
m := &pilorama.Move{ m := &pilorama.Move{
Parent: lm.ParentId, Parent: lm.GetParentId(),
Child: lm.ChildId, Child: lm.GetChildId(),
} }
if err := m.Meta.FromBytes(lm.Meta); err != nil { if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
return err return err
} }
opsCh <- m opsCh <- m