Linter update #857
15 changed files with 34 additions and 30 deletions
|
@ -79,5 +79,8 @@ linters:
|
|||
- contextcheck
|
||||
- importas
|
||||
- truecloudlab-linters
|
||||
- perfsprint
|
||||
- testifylint
|
||||
- protogetter
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
|
4
Makefile
4
Makefile
|
@ -8,8 +8,8 @@ HUB_IMAGE ?= truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.21
|
||||
LINT_VERSION ?= 1.54.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||
LINT_VERSION ?= 1.55.2
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.3
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOC_GEN_GO_VERSION ?= $(shell go list -f '{{.Version}}' -m google.golang.org/protobuf)
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||
|
|
|
@ -220,7 +220,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu
|
|||
if resp.GetBody().GetStatus() != control.GetShardEvacuationStatusResponse_Body_RUNNING ||
|
||||
resp.GetBody().GetDuration() == nil ||
|
||||
resp.GetBody().GetTotal() == 0 ||
|
||||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed()+resp.Body.GetSkipped() == 0 {
|
||||
resp.GetBody().GetEvacuated()+resp.GetBody().GetFailed()+resp.GetBody().GetSkipped() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -252,8 +252,8 @@ func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatus
|
|||
}
|
||||
|
||||
func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) {
|
||||
if len(resp.Body.GetErrorMessage()) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.Body.GetErrorMessage()))
|
||||
if len(resp.GetBody().GetErrorMessage()) > 0 {
|
||||
sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ func removeRule(cmd *cobra.Command, _ []string) {
|
|||
|
||||
verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
|
||||
|
||||
if resp.GetBody().Removed {
|
||||
if resp.GetBody().GetRemoved() {
|
||||
cmd.Println("Rule has been removed.")
|
||||
} else {
|
||||
cmd.Println("Rule has not been removed.")
|
||||
|
|
|
@ -65,7 +65,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
|
|||
out := make([]map[string]any, 0, len(ii))
|
||||
for _, i := range ii {
|
||||
out = append(out, map[string]any{
|
||||
"shard_id": base58.Encode(i.Shard_ID),
|
||||
"shard_id": base58.Encode(i.GetShard_ID()),
|
||||
"mode": shardModeToString(i.GetMode()),
|
||||
"metabase": i.GetMetabasePath(),
|
||||
"blobstor": i.GetBlobstor(),
|
||||
|
@ -105,7 +105,7 @@ func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
|
|||
pathPrinter("Write-cache", i.GetWritecachePath())+
|
||||
pathPrinter("Pilorama", i.GetPiloramaPath())+
|
||||
fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
|
||||
base58.Encode(i.Shard_ID),
|
||||
base58.Encode(i.GetShard_ID()),
|
||||
shardModeToString(i.GetMode()),
|
||||
)
|
||||
}
|
||||
|
@ -122,6 +122,6 @@ func shardModeToString(m control.ShardMode) string {
|
|||
|
||||
func sortShardsByID(ii []*control.ShardInfo) {
|
||||
sort.Slice(ii, func(i, j int) bool {
|
||||
return bytes.Compare(ii[i].Shard_ID, ii[j].Shard_ID) < 0
|
||||
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
|
||||
})
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ func add(cmd *cobra.Command, _ []string) {
|
|||
resp, err := cli.Add(ctx, req)
|
||||
commonCmd.ExitOnErr(cmd, "failed to cal add: %w", err)
|
||||
|
||||
cmd.Println("Node ID: ", resp.Body.NodeId)
|
||||
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
|
||||
}
|
||||
|
||||
func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
|
||||
|
|
|
@ -167,7 +167,6 @@ func connectNats(ctx context.Context, c *cfg) {
|
|||
err := c.cfgNotifications.nw.w.Connect(ctx, endpoint)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not connect to a nats endpoint %s: %v", endpoint, err))
|
||||
} else {
|
||||
|
||||
c.log.Info(logs.NatsConnectedToEndpoint, zap.String("endpoint", endpoint))
|
||||
}
|
||||
c.log.Info(logs.NatsConnectedToEndpoint, zap.String("endpoint", endpoint))
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func runTestIgnoreLogicalErrors(t *testing.T, s common.Storage, objects []object
|
|||
|
||||
_, err := s.Iterate(context.Background(), iterPrm)
|
||||
require.Equal(t, err, logicErr)
|
||||
require.Equal(t, len(objects)/2, len(seen))
|
||||
require.Len(t, seen, len(objects)/2)
|
||||
for i := range objects {
|
||||
d, ok := seen[objects[i].addr.String()]
|
||||
if ok {
|
||||
|
|
|
@ -91,7 +91,7 @@ func TestFlush[Option any](
|
|||
|
||||
require.Equal(t, uint32(0), errCount.Load())
|
||||
require.Error(t, wc.Flush(context.Background(), false))
|
||||
require.True(t, errCount.Load() > 0)
|
||||
require.Greater(t, errCount.Load(), uint32(0))
|
||||
require.NoError(t, wc.Flush(context.Background(), true))
|
||||
|
||||
check(t, mb, bs, objects)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/metrics"
|
||||
|
@ -52,7 +52,7 @@ func (m *writeCacheMetrics) AddMethodDuration(shardID string, method string, suc
|
|||
m.methodDuration.With(
|
||||
prometheus.Labels{
|
||||
shardIDLabel: shardID,
|
||||
successLabel: fmt.Sprintf("%v", success),
|
||||
successLabel: strconv.FormatBool(success),
|
||||
storageLabel: storageType,
|
||||
methodLabel: method,
|
||||
},
|
||||
|
|
|
@ -114,13 +114,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer
|
|||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
if len(req.Body.GetContainerId()) > 0 && len(req.Body.GetOwner()) > 0 {
|
||||
if len(req.GetBody().GetContainerId()) > 0 && len(req.GetBody().GetOwner()) > 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "specify the owner and container at the same time is not allowed")
|
||||
}
|
||||
var vub uint32
|
||||
if len(req.Body.GetContainerId()) > 0 {
|
||||
if len(req.GetBody().GetContainerId()) > 0 {
|
||||
var containerID cid.ID
|
||||
if err := containerID.Decode(req.Body.GetContainerId()); err != nil {
|
||||
if err := containerID.Decode(req.GetBody().GetContainerId()); err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("failed to parse container ID: %s", err.Error()))
|
||||
}
|
||||
var err error
|
||||
|
|
|
@ -15,12 +15,12 @@ func (s *Server) Doctor(ctx context.Context, req *control.DoctorRequest) (*contr
|
|||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
|
||||
if !req.Body.RemoveDuplicates {
|
||||
if !req.GetBody().GetRemoveDuplicates() {
|
||||
return nil, status.Error(codes.InvalidArgument, "operation not specified")
|
||||
}
|
||||
|
||||
var prm engine.RemoveDuplicatesPrm
|
||||
prm.Concurrency = int(req.Body.Concurrency)
|
||||
prm.Concurrency = int(req.GetBody().GetConcurrency())
|
||||
|
||||
err = s.s.RemoveDuplicates(ctx, prm)
|
||||
if err != nil {
|
||||
|
|
|
@ -36,8 +36,8 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques
|
|||
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown shard mode: %s", requestedMode))
|
||||
}
|
||||
|
||||
for _, shardID := range s.getShardIDList(req.Body.GetShard_ID()) {
|
||||
err = s.s.SetShardMode(shardID, m, req.Body.GetResetErrorCounter())
|
||||
for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) {
|
||||
err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
|
|
@ -369,13 +369,14 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
|
|||
x.ParentId = parent
|
||||
x.NodeId = node
|
||||
x.Timestamp = m.Time
|
||||
if b.AllAttributes {
|
||||
if b.GetAllAttributes() {
|
||||
x.Meta = metaToProto(m.Items)
|
||||
} else {
|
||||
var metaValue []*KeyValue
|
||||
for _, kv := range m.Items {
|
||||
for _, attr := range b.GetAttributes() {
|
||||
if kv.Key == attr {
|
||||
x.Meta = append(x.Meta, &KeyValue{
|
||||
metaValue = append(metaValue, &KeyValue{
|
||||
Key: kv.Key,
|
||||
Value: kv.Value,
|
||||
})
|
||||
|
@ -383,6 +384,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
|
|||
}
|
||||
}
|
||||
}
|
||||
x.Meta = metaValue
|
||||
}
|
||||
info = append(info, &x)
|
||||
}
|
||||
|
@ -670,8 +672,8 @@ func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
|
|||
meta := make([]pilorama.KeyValue, len(arr))
|
||||
for i, kv := range arr {
|
||||
if kv != nil {
|
||||
meta[i].Key = kv.Key
|
||||
meta[i].Value = kv.Value
|
||||
meta[i].Key = kv.GetKey()
|
||||
meta[i].Value = kv.GetValue()
|
||||
}
|
||||
}
|
||||
return meta
|
||||
|
|
|
@ -243,10 +243,10 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string,
|
|||
for ; err == nil; res, err = c.Recv() {
|
||||
lm := res.GetBody().GetOperation()
|
||||
m := &pilorama.Move{
|
||||
Parent: lm.ParentId,
|
||||
Child: lm.ChildId,
|
||||
Parent: lm.GetParentId(),
|
||||
Child: lm.GetChildId(),
|
||||
}
|
||||
if err := m.Meta.FromBytes(lm.Meta); err != nil {
|
||||
if err := m.Meta.FromBytes(lm.GetMeta()); err != nil {
|
||||
return err
|
||||
}
|
||||
opsCh <- m
|
||||
|
|
Loading…
Reference in a new issue
Is it related to the commit (
update linter versions
)?Yes: this fix was found by new version of linter.