Compare commits

..

9 commits

Author SHA1 Message Date
c98357606b
[#1606] Use slices.Clone()/bytes.Clone() where possible
gopatch:
```
@@
var from, to expression
@@
+import "bytes"
-to := make([]byte, len(from))
-copy(to, from)
+to := bytes.Clone(from)

@@
var from, to expression
@@
+import "bytes"
-to = make([]byte, len(from))
-copy(to, from)
+to = bytes.Clone(from)

@@
var from, to, typ expression
@@
+import "slices"
-to := make([]typ, len(from))
-copy(to, from)
+to := slices.Clone(from)

@@
var from, to, typ expression
@@
+import "slices"
-to = make([]typ, len(from))
-copy(to, from)
+to = slices.Clone(from)
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-17 14:50:14 +03:00
80de5d70bf [#1593] node: Fix initialization of ape_chain cache
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2025-01-17 08:58:47 +00:00
57efa0bc8e
[#1604] policer: Properly handle maintenance nodes
Consider `REP 1 REP 1` placement (selects/filters are omitted).
The placement is `[1, 2], [1, 0]`. We are the 0-th node.
Node 1 is under maintenance, so we do not replicate object
on the node 2. In the second replication group node 1 is under maintenance,
but current caching logic considers it as "replica holder" and removes
local copy. Voilà, we have DL if the object is missing from the node 1.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-16 16:37:52 +03:00
26e0c82fb8
[#1604] policer/test: Add test for MAINTENANCE runtime status
The node can have MAINTENANCE status in the network map, but can also be
ONLINE while responding with MAINTENANCE. These are 2 different code
paths, let's test them separately.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-16 16:37:16 +03:00
4538ccb12a
[#1604] policer: Do not process the same node twice
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-16 16:37:16 +03:00
84e1599997
[#1604] policer: Remove one-line helpers
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-16 16:37:16 +03:00
5a270e2e61
[#1604] policer: Use status instead of bool value in node cache
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-01-16 16:37:16 +03:00
436d65d784 [#1591] Build and host OCI images on our own infra
Similar to TrueCloudLab/frostfs-s3-gw#587
this PR introduces a CI pipeline that builds Docker images and pushes them
to our selfhosted registry.

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-01-16 07:46:53 +00:00
c3c034ecca [#1601] util: Correctly parse 'root' name for container resources
* Convert `root/*` to `//`;
* Add unit-test case for parses to check parsing correctness.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2025-01-15 12:13:02 +00:00
21 changed files with 115 additions and 66 deletions

View file

@ -0,0 +1,28 @@
name: OCI image
on:
push:
workflow_dispatch:
jobs:
image:
name: Build container images
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make images
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make push-images
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -139,6 +139,15 @@ images: image-storage image-ir image-cli image-adm
# Build dirty local Docker images # Build dirty local Docker images
dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm
# Push FrostFS components' docker image to the registry
push-image-%:
@echo "⇒ Publish FrostFS $* docker image "
@docker push $(HUB_IMAGE)-$*:$(HUB_TAG)
# Push all Docker images to the registry
.PHONY: push-images
push-images: push-image-storage push-image-ir push-image-cli push-image-adm
# Run `make %` in Golang container # Run `make %` in Golang container
docker/%: docker/%:
docker run --rm -t \ docker run --rm -t \

View file

@ -11,6 +11,7 @@ import (
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"strconv" "strconv"
"strings" "strings"
"text/template" "text/template"
@ -410,8 +411,7 @@ func initClient(rpc []string) *rpcclient.Client {
var c *rpcclient.Client var c *rpcclient.Client
var err error var err error
shuffled := make([]string, len(rpc)) shuffled := slices.Clone(rpc)
copy(shuffled, rpc)
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
for _, endpoint := range shuffled { for _, endpoint := range shuffled {

View file

@ -1146,7 +1146,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) {
c.cfgObject.cfgAccessPolicyEngine.policyContractHash) c.cfgObject.cfgAccessPolicyEngine.policyContractHash)
cacheSize := morphconfig.APEChainCacheSize(c.appCfg) cacheSize := morphconfig.APEChainCacheSize(c.appCfg)
if cacheSize > 0 { if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 {
morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL)
} }

View file

@ -1,6 +1,7 @@
package config package config
import ( import (
"slices"
"strings" "strings"
configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
@ -52,6 +53,5 @@ func (x *Config) Value(name string) any {
// It supports only one level of nesting and is intended to be used // It supports only one level of nesting and is intended to be used
// to provide default values. // to provide default values.
func (x *Config) SetDefault(from *Config) { func (x *Config) SetDefault(from *Config) {
x.defaultPath = make([]string, len(from.path)) x.defaultPath = slices.Clone(from.path)
copy(x.defaultPath, from.path)
} }

View file

@ -95,19 +95,15 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
## Post-release ## Post-release
### Prepare and push images to a Docker Hub (if not automated) ### Prepare and push images to a Docker registry (automated)
Create Docker images for all applications and push them into Docker Hub Create Docker images for all applications and push them into container registry
(requires [organization](https://hub.docker.com/u/truecloudlab) privileges) (executed automatically in Forgejo Actions upon pushing a release tag):
```shell ```shell
$ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION}
$ make images $ make images
$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION} $ make push-images
$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION}
$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION}
``` ```
### Make a proper release (if not automated) ### Make a proper release (if not automated)

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"slices"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -255,8 +256,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro
copyShards := func() []pooledShard { copyShards := func() []pooledShard {
mtx.RLock() mtx.RLock()
defer mtx.RUnlock() defer mtx.RUnlock()
t := make([]pooledShard, len(shards)) t := slices.Clone(shards)
copy(t, shards)
return t return t
} }
eg.Go(func() error { eg.Go(func() error {

View file

@ -3,6 +3,7 @@ package engine
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
"sync" "sync"
"time" "time"
@ -123,8 +124,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState {
if s == nil { if s == nil {
return nil return nil
} }
shardIDs := make([]string, len(s.shardIDs)) shardIDs := slices.Clone(s.shardIDs)
copy(shardIDs, s.shardIDs)
return &EvacuationState{ return &EvacuationState{
shardIDs: shardIDs, shardIDs: shardIDs,

View file

@ -188,8 +188,7 @@ loop:
if offset != nil { if offset != nil {
// new slice is much faster but less memory efficient // new slice is much faster but less memory efficient
// we need to copy, because offset exists during bbolt tx // we need to copy, because offset exists during bbolt tx
cursor.inBucketOffset = make([]byte, len(offset)) cursor.inBucketOffset = bytes.Clone(offset)
copy(cursor.inBucketOffset, offset)
} }
if len(result) == 0 { if len(result) == 0 {
@ -198,8 +197,7 @@ loop:
// new slice is much faster but less memory efficient // new slice is much faster but less memory efficient
// we need to copy, because bucketName exists during bbolt tx // we need to copy, because bucketName exists during bbolt tx
cursor.bucketName = make([]byte, len(bucketName)) cursor.bucketName = bytes.Clone(bucketName)
copy(cursor.bucketName, bucketName)
return result, cursor, nil return result, cursor, nil
} }

View file

@ -1506,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
}) })
if len(res.Items) == batchSize { if len(res.Items) == batchSize {
res.NextPageToken = make([]byte, len(k)) res.NextPageToken = bytes.Clone(k)
copy(res.NextPageToken, k)
break break
} }
} }

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"slices"
"sort" "sort"
"strings" "strings"
@ -84,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID
s.operations = append(s.operations, op) s.operations = append(s.operations, op)
} }
mCopy := make([]KeyValue, len(m)) mCopy := slices.Clone(m)
copy(mCopy, m)
op := s.do(&Move{ op := s.do(&Move{
Parent: node, Parent: node,
Meta: Meta{ Meta: Meta{

View file

@ -2,6 +2,7 @@ package client
import ( import (
"context" "context"
"slices"
"sort" "sort"
"time" "time"
@ -99,8 +100,7 @@ mainLoop:
case <-t.C: case <-t.C:
c.switchLock.RLock() c.switchLock.RLock()
endpointsCopy := make([]Endpoint, len(c.endpoints.list)) endpointsCopy := slices.Clone(c.endpoints.list)
copy(endpointsCopy, c.endpoints.list)
currPriority := c.endpoints.list[c.endpoints.curr].Priority currPriority := c.endpoints.list[c.endpoints.curr].Priority
highestPriority := c.endpoints.list[0].Priority highestPriority := c.endpoints.list[0].Priority

View file

@ -7,6 +7,7 @@ import (
"crypto/sha256" "crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"slices"
"strconv" "strconv"
"testing" "testing"
@ -41,8 +42,7 @@ type testPlacementBuilder struct {
func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
[][]netmap.NodeInfo, error, [][]netmap.NodeInfo, error,
) { ) {
arr := make([]netmap.NodeInfo, len(p.vectors[0])) arr := slices.Clone(p.vectors[0])
copy(arr, p.vectors[0])
return [][]netmap.NodeInfo{arr}, nil return [][]netmap.NodeInfo{arr}, nil
} }

View file

@ -6,6 +6,7 @@ import (
"crypto/sha256" "crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"slices"
"strconv" "strconv"
"testing" "testing"
@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.
return nil, errors.New("vectors for address not found") return nil, errors.New("vectors for address not found")
} }
res := make([][]netmap.NodeInfo, len(vs)) res := slices.Clone(vs)
copy(res, vs)
return res, nil return res, nil
} }

View file

@ -1,6 +1,7 @@
package placement package placement
import ( import (
"slices"
"strconv" "strconv"
"testing" "testing"
@ -33,8 +34,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
vc := make([][]netmap.NodeInfo, 0, len(v)) vc := make([][]netmap.NodeInfo, 0, len(v))
for i := range v { for i := range v {
ns := make([]netmap.NodeInfo, len(v[i])) ns := slices.Clone(v[i])
copy(ns, v[i])
vc = append(vc, ns) vc = append(vc, ns)
} }

View file

@ -126,12 +126,15 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
} else { } else {
if status := checkedNodes.processStatus(nodes[i]); status.Processed() { if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
if status == nodeHoldsObject { if status == nodeHoldsObject {
// node already contains replica, no need to replicate
nodes = append(nodes[:i], nodes[i+1:]...)
i--
shortage-- shortage--
} }
if status == nodeIsUnderMaintenance {
shortage--
uncheckedCopies++
}
nodes = append(nodes[:i], nodes[i+1:]...)
i--
continue continue
} }
@ -143,10 +146,10 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
if err == nil { if err == nil {
shortage-- shortage--
checkedNodes.submitReplicaHolder(nodes[i]) checkedNodes.set(nodes[i], nodeHoldsObject)
} else { } else {
if client.IsErrObjectNotFound(err) { if client.IsErrObjectNotFound(err) {
checkedNodes.submitReplicaCandidate(nodes[i]) checkedNodes.set(nodes[i], nodeDoesNotHoldObject)
continue continue
} else if client.IsErrNodeUnderMaintenance(err) { } else if client.IsErrNodeUnderMaintenance(err) {
shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies)
@ -155,6 +158,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
zap.Stringer("object", addr), zap.Stringer("object", addr),
zap.Error(err), zap.Error(err),
) )
checkedNodes.set(nodes[i], nodeStatusUnknown)
} }
} }
} }
@ -174,7 +178,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
// However, additional copies should not be removed in this case, // However, additional copies should not be removed in this case,
// because we can remove the only copy this way. // because we can remove the only copy this way.
func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
checkedNodes.submitReplicaHolder(node) checkedNodes.set(node, nodeIsUnderMaintenance)
shortage-- shortage--
uncheckedCopies++ uncheckedCopies++

View file

@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) {
cache.SubmitSuccessfulReplication(node) cache.SubmitSuccessfulReplication(node)
require.Equal(t, cache.processStatus(node), nodeHoldsObject) require.Equal(t, cache.processStatus(node), nodeHoldsObject)
cache.submitReplicaCandidate(node) cache.set(node, nodeDoesNotHoldObject)
require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject)
cache.submitReplicaHolder(node) cache.set(node, nodeHoldsObject)
require.Equal(t, cache.processStatus(node), nodeHoldsObject) require.Equal(t, cache.processStatus(node), nodeHoldsObject)
} }

View file

@ -8,6 +8,8 @@ const (
nodeNotProcessed nodeProcessStatus = iota nodeNotProcessed nodeProcessStatus = iota
nodeDoesNotHoldObject nodeDoesNotHoldObject
nodeHoldsObject nodeHoldsObject
nodeStatusUnknown
nodeIsUnderMaintenance
) )
func (st nodeProcessStatus) Processed() bool { func (st nodeProcessStatus) Processed() bool {
@ -15,37 +17,19 @@ func (st nodeProcessStatus) Processed() bool {
} }
// nodeCache tracks Policer's check progress. // nodeCache tracks Policer's check progress.
type nodeCache map[uint64]bool type nodeCache map[uint64]nodeProcessStatus
func newNodeCache() nodeCache { func newNodeCache() nodeCache {
return make(map[uint64]bool) return make(map[uint64]nodeProcessStatus)
} }
func (n nodeCache) set(node netmap.NodeInfo, val bool) { func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
n[node.Hash()] = val n[node.Hash()] = val
} }
// submits storage node as a candidate to store the object replica in case of
// shortage.
func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) {
n.set(node, false)
}
// submits storage node as a current object replica holder.
func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) {
n.set(node, true)
}
// processStatus returns current processing status of the storage node. // processStatus returns current processing status of the storage node.
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
switch val, ok := n[node.Hash()]; { return n[node.Hash()]
case !ok:
return nodeNotProcessed
case val:
return nodeHoldsObject
default:
return nodeDoesNotHoldObject
}
} }
// SubmitSuccessfulReplication marks given storage node as a current object // SubmitSuccessfulReplication marks given storage node as a current object
@ -53,5 +37,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
// //
// SubmitSuccessfulReplication implements replicator.TaskResult. // SubmitSuccessfulReplication implements replicator.TaskResult.
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
n.submitReplicaHolder(node) n.set(node, nodeHoldsObject)
} }

View file

@ -78,6 +78,7 @@ func TestProcessObject(t *testing.T) {
maintenanceNodes []int maintenanceNodes []int
wantRemoveRedundant bool wantRemoveRedundant bool
wantReplicateTo []int wantReplicateTo []int
headResult map[int]error
ecInfo *objectcore.ECInfo ecInfo *objectcore.ECInfo
}{ }{
{ {
@ -127,7 +128,7 @@ func TestProcessObject(t *testing.T) {
nodeCount: 2, nodeCount: 2,
policy: `REP 2 REP 2`, policy: `REP 2 REP 2`,
placement: [][]int{{0, 1}, {0, 1}}, placement: [][]int{{0, 1}, {0, 1}},
wantReplicateTo: []int{1, 1}, // is this actually good? wantReplicateTo: []int{1},
}, },
{ {
desc: "lock object must be replicated to all nodes", desc: "lock object must be replicated to all nodes",
@ -145,6 +146,14 @@ func TestProcessObject(t *testing.T) {
objHolders: []int{1}, objHolders: []int{1},
maintenanceNodes: []int{2}, maintenanceNodes: []int{2},
}, },
{
desc: "preserve local copy when node response with MAINTENANCE",
nodeCount: 3,
policy: `REP 2`,
placement: [][]int{{1, 2}},
objHolders: []int{1},
headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)},
},
{ {
desc: "lock object must be replicated to all EC nodes", desc: "lock object must be replicated to all EC nodes",
objType: objectSDK.TypeLock, objType: objectSDK.TypeLock,
@ -161,6 +170,14 @@ func TestProcessObject(t *testing.T) {
placement: [][]int{{0, 1, 2}}, placement: [][]int{{0, 1, 2}},
wantReplicateTo: []int{1, 2}, wantReplicateTo: []int{1, 2},
}, },
{
desc: "do not remove local copy when MAINTENANCE status is cached",
objType: objectSDK.TypeRegular,
nodeCount: 3,
policy: `REP 1 REP 1`,
placement: [][]int{{1, 2}, {1, 0}},
headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)},
},
} }
for i := range tests { for i := range tests {
@ -204,6 +221,11 @@ func TestProcessObject(t *testing.T) {
t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a)
return nil, errors.New("unexpected object head") return nil, errors.New("unexpected object head")
} }
if ti.headResult != nil {
if err, ok := ti.headResult[index]; ok {
return nil, err
}
}
for _, i := range ti.objHolders { for _, i := range ti.objHolders {
if index == i { if index == i {
return nil, nil return nil, nil

View file

@ -261,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) {
} else { } else {
if lexeme == "*" { if lexeme == "*" {
return nativeschema.ResourceFormatAllContainers, nil return nativeschema.ResourceFormatAllContainers, nil
} else if lexeme == "/*" { } else if lexeme == "/*" || lexeme == "root/*" {
return nativeschema.ResourceFormatRootContainers, nil return nativeschema.ResourceFormatRootContainers, nil
} else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 {
lexeme = lexeme[1:] lexeme = lexeme[1:]

View file

@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) {
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}},
}, },
}, },
{
name: "Valid rule for all containers in explicit root namespace",
rule: "allow Container.Put root/*",
expectRule: policyengine.Rule{
Status: policyengine.Allow,
Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}},
Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}},
},
},
{ {
name: "Valid rule for all objects in root namespace and container", name: "Valid rule for all objects in root namespace and container",
rule: "allow Object.Put /cid/*", rule: "allow Object.Put /cid/*",