[#1598] golangci: Enable unconvert linters

To drop unnecessary conversions.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2025-01-13 16:55:43 +03:00
parent a9f27e074b
commit 4d5ae59a52
Signed by: dstepanov-yadro
GPG key ID: 237AF1A763293BC0
13 changed files with 17 additions and 16 deletions

View file

@ -89,5 +89,6 @@ linters:
- protogetter
- intrange
- tenv
- unconvert
disable-all: true
fast: false

View file

@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) {
outputPath, _ := cmd.Flags().GetString(outputFlag)
if outputPath != "" {
err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644)
err := os.WriteFile(outputPath, overrideMarshalled, 0o644)
commonCmd.ExitOnErr(cmd, "dump error: %w", err)
} else {
fmt.Print("\n")

View file

@ -853,8 +853,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID {
}
func initCfgGRPC() cfgGRPC {
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes
maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload
maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes
return cfgGRPC{
maxChunkSize: maxChunkSize,

View file

@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string {
//
// Returns PermDefault if the value is not a positive number.
func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
p := config.UintSafe((*config.Config)(l.cfg), "perm")
p := config.UintSafe(l.cfg, "perm")
if p == 0 {
p = PermDefault
}
@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
//
// Returns false if the value is not a boolean.
func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
return config.BoolSafe(l.cfg, "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.

View file

@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
}
}
s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt))
s.setControlNetmapStatus(ctrlNetSt)
}
// sets the current node state to the given value. Subsequent cfg.bootstrap

View file

@ -136,6 +136,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error {
if err := os.Remove(p); err != nil {
return err
}
w.fileCounter.Dec(uint64(size))
w.fileCounter.Dec(size)
return nil
}

View file

@ -114,7 +114,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
w.fileCounter.Dec(uint64(size))
w.fileCounter.Dec(size)
}
return err
}

View file

@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common
elem := common.IterationElement{
ObjectData: v,
}
if err := elem.Address.DecodeString(string(k)); err != nil {
if err := elem.Address.DecodeString(k); err != nil {
if req.IgnoreErrors {
continue
}
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err))
return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err))
}
var err error
if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil {

View file

@ -724,7 +724,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
shards := make([]pooledShard, 0, len(e.shards))
for id := range e.shards {
shards = append(shards, pooledShard{
hashedShard: hashedShard(e.shards[id]),
hashedShard: e.shards[id],
pool: e.shardPools[id],
})
}

View file

@ -272,7 +272,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string })
h := hrw.StringHash(objAddr.EncodeToString())
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
shards = append(shards, hashedShard(sh))
shards = append(shards, sh)
}
hrw.SortHasherSliceByValue(shards, h)
return shards
@ -285,7 +285,7 @@ func (e *StorageEngine) unsortedShards() []hashedShard {
shards := make([]hashedShard, 0, len(e.shards))
for _, sh := range e.shards {
shards = append(shards, hashedShard(sh))
shards = append(shards, sh)
}
return shards

View file

@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err
err = s.SetMode(ctx, mode.DegradedReadOnly)
if err != nil {
return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly))
return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly)
}
return nil
}

View file

@ -41,7 +41,7 @@ func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) {
}
return NewEpoch{
Num: uint64(nee.Epoch.Uint64()),
Num: nee.Epoch.Uint64(),
Hash: e.Container,
}, nil
}

View file

@ -59,7 +59,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
s.sizes = &sizes{
payloadSz: uint64(v.GetHeader().GetPayloadLength()),
payloadSz: v.GetHeader().GetPayloadLength(),
}
// check payload size limit overflow