[#1606] Use slices.Clone()/bytes.Clone() where possible

gopatch:
```
@@
var from, to expression
@@
+import "bytes"
-to := make([]byte, len(from))
-copy(to, from)
+to := bytes.Clone(from)

@@
var from, to expression
@@
+import "bytes"
-to = make([]byte, len(from))
-copy(to, from)
+to = bytes.Clone(from)

@@
var from, to, typ expression
@@
+import "slices"
-to := make([]typ, len(from))
-copy(to, from)
+to := slices.Clone(from)

@@
var from, to, typ expression
@@
+import "slices"
-to = make([]typ, len(from))
-copy(to, from)
+to = slices.Clone(from)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
Evgenii Stratonikov 2025-01-17 13:51:18 +03:00
parent 80de5d70bf
commit c98357606b
Signed by: fyrchik
SSH key fingerprint: SHA256:m/TTwCzjnRkXgnzEx9X92ccxy1CcVeinOgDb3NPWWmg
11 changed files with 21 additions and 24 deletions

View file

@ -11,6 +11,7 @@ import (
"net/url"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
"text/template"
@ -410,8 +411,7 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client
var err error
shuffled := make([]string, len(rpc))
copy(shuffled, rpc)
shuffled := slices.Clone(rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled {

View file

@ -1,6 +1,7 @@
package config
import (
"slices"
"strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -52,6 +53,5 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used
// to provide default values.
func (x *Config) SetDefault(from *Config) {
x.defaultPath = make([]string, len(from.path))
copy(x.defaultPath, from.path)
x.defaultPath = slices.Clone(from.path)
}

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"strings"
"sync"
"sync/atomic"
@ -255,8 +256,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
copyShards := func() []pooledShard {
mtx.RLock()
defer mtx.RUnlock()
t := make([]pooledShard, len(shards))
copy(t, shards)
t := slices.Clone(shards)
return t
}
eg.Go(func() error {

View file

@ -3,6 +3,7 @@ package engine
import (
"context"
"fmt"
"slices"
"sync"
"time"
@ -123,8 +124,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState {
if s == nil {
return nil
}
shardIDs := make([]string, len(s.shardIDs))
copy(shardIDs, s.shardIDs)
shardIDs := slices.Clone(s.shardIDs)
return &EvacuationState{
shardIDs: shardIDs,

View file

@ -188,8 +188,7 @@ loop:
if offset != nil {
// new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx
cursor.inBucketOffset = make([]byte, len(offset))
copy(cursor.inBucketOffset, offset)
cursor.inBucketOffset = bytes.Clone(offset)
}
if len(result) == 0 {
@ -198,8 +197,7 @@ loop:
// new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx
cursor.bucketName = make([]byte, len(bucketName))
copy(cursor.bucketName, bucketName)
cursor.bucketName = bytes.Clone(bucketName)
return result, cursor, nil
}

View file

@ -1506,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
})
if len(res.Items) == batchSize {
res.NextPageToken = make([]byte, len(k))
copy(res.NextPageToken, k)
res.NextPageToken = bytes.Clone(k)
break
}
}

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"sort"
"strings"
@ -84,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op)
}
mCopy := make([]KeyValue, len(m))
copy(mCopy, m)
mCopy := slices.Clone(m)
op := s.do(&Move{
Parent: node,
Meta: Meta{

View file

@ -2,6 +2,7 @@ package client
import (
"context"
"slices"
"sort"
"time"
@ -99,8 +100,7 @@ mainLoop:
case <-t.C:
c.switchLock.RLock()
endpointsCopy := make([]Endpoint, len(c.endpoints.list))
copy(endpointsCopy, c.endpoints.list)
endpointsCopy := slices.Clone(c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority

View file

@ -7,6 +7,7 @@ import (
"crypto/sha256"
"errors"
"fmt"
"slices"
"strconv"
"testing"
@ -41,8 +42,7 @@ type testPlacementBuilder struct {
func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
[][]netmap.NodeInfo, error,
) {
arr := make([]netmap.NodeInfo, len(p.vectors[0]))
copy(arr, p.vectors[0])
arr := slices.Clone(p.vectors[0])
return [][]netmap.NodeInfo{arr}, nil
}

View file

@ -6,6 +6,7 @@ import (
"crypto/sha256"
"errors"
"fmt"
"slices"
"strconv"
"testing"
@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return nil, errors.New("vectors for address not found")
}
res := make([][]netmap.NodeInfo, len(vs))
copy(res, vs)
res := slices.Clone(vs)
return res, nil
}

View file

@ -1,6 +1,7 @@
package placement
import (
"slices"
"strconv"
"testing"
@ -33,8 +34,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v {
ns := make([]netmap.NodeInfo, len(v[i]))
copy(ns, v[i])
ns := slices.Clone(v[i])
vc = append(vc, ns)
}