Fix future gopls issues #1672
23 changed files with 55 additions and 84 deletions
|
@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error {
|
|||
nbuf := make([]byte, 8)
|
||||
copy(nbuf[:], v)
|
||||
n := binary.LittleEndian.Uint64(nbuf)
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n))
|
||||
case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig:
|
||||
if len(v) == 0 || len(v) > 1 {
|
||||
return helper.InvalidConfigValueErr(k)
|
||||
}
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1))
|
||||
default:
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v))))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) {
|
|||
if info.version == "" {
|
||||
info.version = "unknown"
|
||||
}
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n",
|
||||
info.name, info.version, info.hash.StringLE())))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n",
|
||||
info.name, info.version, info.hash.StringLE()))
|
||||
}
|
||||
_ = tw.Flush()
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package helper
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
|
||||
|
@ -118,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error {
|
|||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
for _, key := range NetmapConfigKeys {
|
||||
if k == key {
|
||||
md[k] = v
|
||||
break
|
||||
}
|
||||
if slices.Contains(NetmapConfigKeys, k) {
|
||||
md[k] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error {
|
|||
buf := bytes.NewBuffer(nil)
|
||||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte)))
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice)))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice))
|
||||
|
||||
_ = tw.Flush()
|
||||
cmd.Print(buf.String())
|
||||
|
|
|
@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) {
|
|||
tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0)
|
||||
_, _ = tw.Write([]byte("#\tName\tType\n"))
|
||||
for i, t := range targets {
|
||||
_, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())))
|
||||
_, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))
|
||||
}
|
||||
_ = tw.Flush()
|
||||
cmd.Print(buf.String())
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package tui
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/rivo/tview"
|
||||
)
|
||||
|
@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) {
|
|||
|
||||
// Used history data for search prompt, so just make that data recent.
|
||||
if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
|
||||
f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
|
||||
f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1)
|
||||
f.history = append(f.history, s)
|
||||
}
|
||||
|
||||
|
|
|
@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchDelay() time.Duration {
|
||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||
if d < 0 {
|
||||
d = 0
|
||||
}
|
||||
return d
|
||||
return max(d, 0)
|
||||
}
|
||||
|
||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||
|
@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchSize() int {
|
||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||
if s < 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
||||
// NoSync returns the value of "no_sync" config parameter.
|
||||
|
@ -66,8 +60,5 @@ func (x *Config) NoSync() bool {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) PageSize() int {
|
||||
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
|
||||
if s < 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
|
|
@ -52,10 +52,7 @@ func (x *Config) NoSync() bool {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchDelay() time.Duration {
|
||||
d := config.DurationSafe((*config.Config)(x), "max_batch_delay")
|
||||
if d <= 0 {
|
||||
d = 0
|
||||
}
|
||||
return d
|
||||
return max(d, 0)
|
||||
}
|
||||
|
||||
// MaxBatchSize returns the value of "max_batch_size" config parameter.
|
||||
|
@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration {
|
|||
// Returns 0 if the value is not a positive number.
|
||||
func (x *Config) MaxBatchSize() int {
|
||||
s := int(config.IntSafe((*config.Config)(x), "max_batch_size"))
|
||||
if s <= 0 {
|
||||
s = 0
|
||||
}
|
||||
return s
|
||||
return max(s, 0)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package blobstortest
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
||||
|
@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) {
|
|||
_, err := s.Delete(context.Background(), delPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
objects = append(objects[:delID], objects[delID+1:]...)
|
||||
objects = slices.Delete(objects, delID, delID+1)
|
||||
|
||||
runTestNormalHandler(t, s, objects)
|
||||
|
||||
|
|
|
@ -405,8 +405,8 @@ func TestEvacuateSingleProcess(t *testing.T) {
|
|||
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
||||
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
||||
|
||||
blocker := make(chan interface{})
|
||||
running := make(chan interface{})
|
||||
blocker := make(chan any)
|
||||
running := make(chan any)
|
||||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[1:2]
|
||||
|
@ -447,8 +447,8 @@ func TestEvacuateObjectsAsync(t *testing.T) {
|
|||
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
||||
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
||||
|
||||
blocker := make(chan interface{})
|
||||
running := make(chan interface{})
|
||||
blocker := make(chan any)
|
||||
running := make(chan any)
|
||||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[1:2]
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
|
||||
|
@ -250,7 +251,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres
|
|||
unlockedObjects = append(unlockedObjects, addr)
|
||||
} else {
|
||||
// exclude locker
|
||||
keyLockers = append(keyLockers[:i], keyLockers[i+1:]...)
|
||||
keyLockers = slices.Delete(keyLockers, i, i+1)
|
||||
|
||||
v, err = encodeList(keyLockers)
|
||||
if err != nil {
|
||||
|
|
|
@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) {
|
|||
for idx := range 100 {
|
||||
var putPrm PutPrm
|
||||
putPrm.SetObject(testutil.GenerateObject())
|
||||
putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
|
||||
putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx))
|
||||
_, err := db.Put(context.Background(), putPrm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -205,10 +205,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
|
|||
r := mergeNodeInfos(res)
|
||||
for i := range r {
|
||||
if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start {
|
||||
finish := i + count
|
||||
if len(res) < finish {
|
||||
finish = len(res)
|
||||
}
|
||||
finish := min(len(res), i+count)
|
||||
last := string(findAttr(r[finish-1].Meta, AttributeFilename))
|
||||
return r[i:finish], &last, nil
|
||||
}
|
||||
|
|
|
@ -213,7 +213,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
|
|||
// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
|
||||
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
|
||||
// The default batchSize is 100, the default limit from neo-go.
|
||||
func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
|
||||
func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error {
|
||||
start := time.Now()
|
||||
success := false
|
||||
defer func() {
|
||||
|
@ -262,10 +262,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int
|
|||
}()
|
||||
|
||||
// Batch size for TraverseIterator() can restricted on the server-side.
|
||||
traverseBatchSize := batchSize
|
||||
if invoker.DefaultIteratorResultItems < traverseBatchSize {
|
||||
traverseBatchSize = invoker.DefaultIteratorResultItems
|
||||
}
|
||||
traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems)
|
||||
for {
|
||||
items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize)
|
||||
if err != nil {
|
||||
|
|
|
@ -3,6 +3,7 @@ package network
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
|
@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
|
|||
// at least one common address.
|
||||
func (x AddressGroup) Intersects(x2 AddressGroup) bool {
|
||||
for i := range x {
|
||||
for j := range x2 {
|
||||
if x[i].equal(x2[j]) {
|
||||
return true
|
||||
}
|
||||
if slices.ContainsFunc(x2, x[i].equal) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl"
|
||||
|
@ -91,13 +92,7 @@ func TestIsVerbCompatible(t *testing.T) {
|
|||
|
||||
for op, list := range table {
|
||||
for _, verb := range verbs {
|
||||
var contains bool
|
||||
for _, v := range list {
|
||||
if v == verb {
|
||||
contains = true
|
||||
break
|
||||
}
|
||||
}
|
||||
contains := slices.Contains(list, verb)
|
||||
|
||||
tok.ForVerb(verb)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
|
@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) {
|
|||
for i := range members {
|
||||
for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body
|
||||
if members[i].Equals(incoming[j]) {
|
||||
incoming = append(incoming[:j], incoming[j+1:]...)
|
||||
incoming = slices.Delete(incoming, j, j+1)
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package searchsvc
|
|||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
|
||||
|
@ -53,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error {
|
|||
}
|
||||
|
||||
// exclude processed address
|
||||
list = append(list[:i], list[i+1:]...)
|
||||
list = slices.Delete(list, i, i+1)
|
||||
i--
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package util
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
||||
|
@ -93,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o
|
|||
}
|
||||
|
||||
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
|
||||
vs[i] = append(vs[i][:j], vs[i][j+1:]...)
|
||||
vs[i] = slices.Delete(vs[i], j, j+1)
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -288,8 +288,8 @@ func (t *Traverser) Next() []Node {
|
|||
func (t *Traverser) skipEmptyVectors() {
|
||||
for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body
|
||||
if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 {
|
||||
t.vectors = append(t.vectors[:i], t.vectors[i+1:]...)
|
||||
t.rem = append(t.rem[:i], t.rem[i+1:]...)
|
||||
t.vectors = slices.Delete(t.vectors, i, i+1)
|
||||
t.rem = slices.Delete(t.rem, i, i+1)
|
||||
i--
|
||||
} else {
|
||||
break
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -226,10 +227,8 @@ func TestProcessObject(t *testing.T) {
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, i := range ti.objHolders {
|
||||
if index == i {
|
||||
return nil, nil
|
||||
}
|
||||
if slices.Contains(ti.objHolders, index) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, new(apistatus.ObjectNotFound)
|
||||
}
|
||||
|
|
|
@ -340,14 +340,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
|
|||
} else {
|
||||
var metaValue []KeyValue
|
||||
for _, kv := range m.Items {
|
||||
for _, attr := range b.GetAttributes() {
|
||||
if kv.Key == attr {
|
||||
metaValue = append(metaValue, KeyValue{
|
||||
Key: kv.Key,
|
||||
Value: kv.Value,
|
||||
})
|
||||
break
|
||||
}
|
||||
if slices.Contains(b.GetAttributes(), kv.Key) {
|
||||
metaValue = append(metaValue, KeyValue{
|
||||
Key: kv.Key,
|
||||
Value: kv.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
x.Meta = metaValue
|
||||
|
|
|
@ -31,13 +31,10 @@ func PopulateWithObjects(
|
|||
|
||||
for range count {
|
||||
obj := factory()
|
||||
|
||||
id := []byte(fmt.Sprintf(
|
||||
"%c/%c/%c",
|
||||
id := fmt.Appendf(nil, "%c/%c/%c",
|
||||
digits[rand.Int()%len(digits)],
|
||||
digits[rand.Int()%len(digits)],
|
||||
digits[rand.Int()%len(digits)],
|
||||
))
|
||||
digits[rand.Int()%len(digits)])
|
||||
|
||||
prm := meta.PutPrm{}
|
||||
prm.SetObject(obj)
|
||||
|
|
Loading…
Add table
Reference in a new issue